max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
map_vectorizer/config/vectorize_config_parser.py | mgiraldo/map-vectorizer | 148 | 11107591 | import csv
def parse(fp):
'''
:param str fp: File pointer
:rtype dict:
:returns:
'''
tempcolors = []
index = 0
configdata = csv.reader(fp, delimiter=',')
for row in configdata:
if index > 0:
tempcolors.append([int(row[0]), int(row[1]), int(row[2])])
else:
# brightness/contrast/threshold values
brightness = int(row[0])
contrast = int(row[1])
thresholdblack = int(row[2])
thresholdwhite = int(row[3])
index = index + 1
if len(tempcolors) > 2:
basecolors = tempcolors
return {
'basecolors': basecolors,
'brightness': brightness,
'contrast': contrast,
'thresholdblack': thresholdblack,
'thresholdwhite': thresholdwhite,
}
|
src/web/reactionforms/mysql-execute-query/__init__.py | anderson-attilio/runbook | 155 | 11107598 | <reponame>anderson-attilio/runbook
"""Reactions form class for email notifications."""
from wtforms import SelectField, TextAreaField, TextField
from wtforms.validators import DataRequired, Optional
from ..base import BaseReactForm
class ReactForm(BaseReactForm): #pylint: disable=no-init
''' Class that creates an form for the reaction '''
title = "MySQL: Execute SQL Statement"
description = """
<p>This reaction will connect to a remote MySQL server and execute the specified SQL statement.</p>
"""
placeholders = BaseReactForm.placeholders
field_descriptions = BaseReactForm.descriptions
placeholders.update({
'server' : 'mysql.example.com',
'user' : 'dbuser',
'sql' : 'flush privileges',
})
server = TextField(
"MySQL Server Address",
description="""
Specify a hostname or IP address to connect to.
""",
validators=[DataRequired(message='Server is a required field')])
user = TextField(
"Username",
description="""
Specify the MySQL user to connect with.
""",
validators=[DataRequired(message="Username is a required field")])
password = TextField(
"Password",
description="""
Specify the above MySQL user's password.
""",
validators=[DataRequired(message="Password is a required field")])
sql = TextAreaField(
"SQL Statement",
description="""
Specify the SQL statement to execute
""",
validators=[DataRequired(message='SQL Statement is a required field')])
call_on = SelectField(
'Call On',
description=field_descriptions['callon'],
choices=[('false', 'False Monitors'), ('true', 'True Monitors')],
validators=[DataRequired(message='Call on is a required field.')])
if __name__ == '__main__':
pass
|
045_ssd_mobilenet_v2_oid_v4/01_float32/03_float16_quantization.py | IgiArdiyanto/PINTO_model_zoo | 1,529 | 11107611 | ### tf-nightly==2.3.0-rc1
import tensorflow.compat.v1 as tf
import numpy as np
# Float16 Quantization - Input/Output=float32
graph_def_file="export/tflite_graph.pb"
input_arrays=["normalized_input_image_tensor"]
output_arrays=['TFLite_Detection_PostProcess','TFLite_Detection_PostProcess:1','TFLite_Detection_PostProcess:2','TFLite_Detection_PostProcess:3']
input_tensor={"normalized_input_image_tensor":[1,300,300,3]}
converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph(graph_def_file, input_arrays, output_arrays,input_tensor)
converter.allow_custom_ops=True
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,tf.lite.OpsSet.SELECT_TF_OPS]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
with open('ssd_mobilenet_v2_oid_v4_300x300_float16_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Float16 Quantization complete! - ssd_mobilenet_v2_oid_v4_300x300_float16_quant.tflite") |
spec/mock_patch_example_spec.py | michaelelin/mamba | 462 | 11107618 | from mamba import description, context, it
from unittest.mock import patch
from expects import expect, be
class ExampleClass(object):
def hello(self):
return 'Hello'
with description('Testing with unittest.mock'):
with context('when class method is mocked'):
with it('returns mocked value'):
with patch.object(ExampleClass, 'hello', return_value='World!') as mock_method:
expect(mock_method()).to(be('World!'))
|
python/akg/ops/nn/ascend/maxpool_grad_with_argmax.py | tianjiashuo/akg | 286 | 11107634 | <reponame>tianjiashuo/akg
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: maxpool_grad_with_argmax"""
import akg.tvm
import akg.topi
import akg
from akg.ops.nn.ascend.avgpool import cal_pad_shapes_by_strategy
import akg.utils as utils
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor,
(list, tuple), (list, tuple), (list, tuple), (str, list, tuple), (str, type(None)))
def MaxpoolGradWithArgmax(head, mask, shape, kernel, stride, pad, target=utils.CCE):
"""
Automatic differentiate of maxpool with manual schedule.
Args:
head (tvm.tensor.Tensor): Tensor, the gradient needed to be propagation.
mask (tvm.tensor.Tensor): Tensor, the mask indicating where the gradient is propagation.
shape (Union[list, tuple]): five int numbers for pooling input image's size.
kernel (Union[list, tuple]): two int numbers for pooling window's size.
stride (Union[list, tuple]): two int numbers for window's stride.
pad (Union[str, list, tuple]): padding, should be 'VALID','SAME' or
instance of list(four int numbers, as 'CONSTANTS' strategy).
Support **pad** is the same as avgpool's **Strategies**.
Returns:
tvm.tensor.Tensor as result for gradient of maxpooling.
Supported Platforms:
'Ascend'
"""
dtype = head.dtype
kernel_h, kernel_w = kernel
stride_h, stride_w = stride
[ph_h, _, pw_h, _], [out_size_h, out_size_w] = \
cal_pad_shapes_by_strategy(shape, kernel, stride, pad)
batch_size, input_c1, input_h, input_w, input_c0 = shape
# tile size 14 by 14 is proved to be the most efficient one
tile_scale_h = 7
tile_scale_w = 7
tile_h = stride_h * tile_scale_h
if kernel_h == stride_h: # non-overlapping case
tile_h_pad_u = ph_h % stride_h
elif kernel_h % stride_h == 0:
tile_h_pad_u = kernel_h - stride_h - ph_h
else:
tile_h_pad_u = kernel_h - kernel_h % stride_h - ph_h
tile_h_pad_l = kernel_h - stride_h + ph_h
tile_input_h = tile_h + tile_h_pad_u + tile_h_pad_l
tile_h_out = (input_h - 1) // tile_h + 1
if ph_h % stride_h == 0:
pad_output_h = ph_h // stride_h
else:
pad_output_h = ph_h // stride_h + 1
if tile_h_pad_u % stride_h == 0:
pad_output_h -= tile_h_pad_u // stride_h
else:
pad_output_h -= tile_h_pad_u // stride_h + 1
tile_output_h = (tile_input_h - kernel_h) // stride_h + 1
tile_w = stride_w * tile_scale_w
if kernel_w == stride_w: # non-overlapping case
tile_w_pad_u = pw_h % stride_w
elif kernel_w % stride_w == 0:
tile_w_pad_u = kernel_w - stride_w - pw_h
else:
tile_w_pad_u = kernel_w - kernel_w % stride_w - pw_h
tile_w_pad_l = kernel_w - stride_w + pw_h
tile_input_w = tile_w + tile_w_pad_u + tile_w_pad_l
tile_w_out = (input_w - 1) // tile_w + 1
if pw_h % stride_w == 0:
pad_output_w = pw_h // stride_w
else:
pad_output_w = pw_h // stride_w + 1
if tile_w_pad_u % stride_w == 0:
pad_output_w -= tile_w_pad_u // stride_w
else:
pad_output_w -= tile_w_pad_u // stride_w + 1
tile_output_w = (tile_input_w - kernel_w) // stride_w + 1
cce_col2img = intrin_col2im((tile_h, tile_w),
(tile_output_h, tile_output_w),
kernel, stride,
(tile_h_pad_u, tile_h_pad_l, tile_h_pad_u, tile_h_pad_l),
"float32")
head_reshaped = akg.tvm.compute((batch_size, input_c1, tile_h_out, tile_w_out,
tile_output_h, tile_output_w, input_c0),
lambda b, c1, h_out, w_out, oh, ow, c0:
akg.tvm.expr.Select(
akg.tvm.any(h_out * tile_scale_h + pad_output_h + oh < 0,
h_out * tile_scale_h + pad_output_h + oh > out_size_h - 1,
w_out * tile_scale_w + pad_output_w + ow < 0,
w_out * tile_scale_w + pad_output_w + ow > out_size_w - 1),
akg.tvm.const(0.0, dtype=dtype),
head(b, c1,
h_out * tile_scale_h + pad_output_h + oh,
w_out * tile_scale_w + pad_output_w + ow,
c0)),
name="head_reshaped")
mask_reshaped = akg.tvm.compute((batch_size, input_c1, tile_h_out, tile_w_out, kernel_h, kernel_w,
tile_output_h, tile_output_w, input_c0),
lambda b, c1, h_out, w_out, kh, kw, oh, ow, c0:
akg.tvm.expr.Select(
akg.tvm.any(h_out * tile_scale_h + pad_output_h + oh < 0,
h_out * tile_scale_h + pad_output_h + oh > out_size_h - 1,
w_out * tile_scale_w + pad_output_w + ow < 0,
w_out * tile_scale_w + pad_output_w + ow > out_size_w - 1),
akg.tvm.const(0.0, dtype=dtype),
mask(b, c1, kh, kw,
h_out * tile_scale_h + pad_output_h + oh,
w_out * tile_scale_w + pad_output_w + ow,
c0)),
name="mask_reshaped")
d_data = akg.tvm.compute((batch_size, input_c1, tile_h_out, tile_w_out, kernel_h, kernel_w,
tile_output_h, tile_output_w, input_c0),
lambda b, c1, h_out, w_out, kh, kw, oh, ow, c0:
mask_reshaped(b, c1, h_out, w_out, kh, kw, oh, ow, c0)
* head_reshaped(b, c1, h_out, w_out, oh, ow, c0),
name="d_data")
d_data_cast = akg.tvm.compute(d_data.shape,
lambda *i: d_data(*i).astype("float32"),
name="d_data_cast.local.UB")
result_tile = akg.tvm.compute((batch_size, input_c1, tile_h_out, tile_w_out,
tile_h, tile_w, input_c0),
lambda b, c1, h_out, w_out:
cce_col2img(d_data_cast[b, c1, h_out, w_out,
0:kernel_h, 0:kernel_w,
0:tile_output_h, 0:tile_output_w,
0:input_c0]),
name="result_tile.local.UB")
result_cast = akg.topi.cast(result_tile, dtype)
result = akg.tvm.compute(shape,
lambda b, c1, h, w, c0:
result_cast(b, c1, h // tile_h, w // tile_w, h % tile_h, w % tile_w, c0),
name="result")
def comp_func(s):
data_ub = s.cache_read(mask, "local.UB", [mask_reshaped])
head_ub = s.cache_read(head, "local.UB", [head_reshaped])
result_ub = s.cache_write(result, "local.UB")
s[mask_reshaped].set_scope("local.UB")
s[head_reshaped].set_scope("local.UB")
s[d_data].set_scope("local.UB")
s[d_data_cast].set_scope("local.UB")
s[result_tile].set_scope("local.UB")
s[result_cast].set_scope("local.UB")
# inline output
s[result_ub].compute_inline()
# inline inputs
s[head_ub].compute_inline()
s[data_ub].compute_inline()
# result_tile dependencies
s[d_data_cast].compute_at(s[result_tile], result_tile.op.axis[3])
s[d_data].compute_at(s[result_tile], result_tile.op.axis[3])
s[mask_reshaped].compute_at(s[result_tile], result_tile.op.axis[3])
s[head_reshaped].compute_at(s[result_tile], result_tile.op.axis[3])
# tile result
b, c1, h, w, c0 = result.op.axis
h_out, h_in = s[result].split(h, tile_h)
w_out, w_in = s[result].split(w, tile_w)
s[result].reorder(b, c1, h_out, w_out, h_in, w_in, c0)
s[result_tile].compute_at(s[result], w_out)
s[result_cast].compute_at(s[result], w_out)
return result, comp_func
def intrin_col2im(input_shape, output_shape, kernel, stride, pad, dtype):
"""
Compute col2im via cce col2im intrin function call directly
Args:
input_shape: the shape of the image
output_shape: the shape of the result of im2col given the input image
kernel: kernel sizes for im2col
stride: stride sizes for im2col
pad: padding sizes for im2col, including padding top, bottom, left, and right
dtype: type of the data
Return:
cce intrin function call for col2im
"""
input_w, input_h = input_shape
output_w, output_h = output_shape
pad_left, pad_right, pad_top, pad_bottom = pad
w_idx_kernel = 0
h_idx_kernel = 0
w_idx = (-pad_left) & 0xffff
h_idx = (-pad_top) & 0xffff
c1_idx = 0
stride_w, stride_h = stride
kernel_w, kernel_h = kernel
dilation_w = dilation_h = 1
jump_offset = 0
repeat_mode = 0
repeat_time = (output_w * output_h + 15) // 16
input_b = 1
input_c1 = 1
input_h_tile = 1
input_w_tile = 1
input_c0 = 16
input_shape = (input_b, input_c1, input_h_tile, input_w_tile, kernel_w, kernel_h, output_w, output_h, input_c0)
input_data = akg.tvm.placeholder(input_shape, dtype=dtype)
result = akg.tvm.compute((input_w, input_h, input_c0),
lambda h, w, c0:
input_data[0, 0, 0, 0,
h // kernel_h,
w // kernel_w,
h % kernel_h, w % kernel_w,
c0],
name='col2im_intrinsic')
input_data_buff = akg.tvm.decl_buffer(input_data.shape, input_data.dtype,
name="input_data_buff",
offset_factor=1, scope="local.UB")
result_buff = akg.tvm.decl_buffer(result.shape, result.dtype,
name="result_buff",
offset_factor=1, scope="local.UB")
def pack_args(sp):
if len(sp) != 20:
raise RuntimeError("20 args are expected to pack but got {}"
"".format(len(sp)))
# fcol2img = (sp[0] & 0xffff) << 0 | (sp[1] & 0xffff) << 16
# | (sp[2] & 0xff) << 32 | (sp[3] & 0xff) << 40
# | (sp[4] & 0xff) << 48 | (sp[5] & 0xff) << 56
# Xm = (sp[6] & 0xff) << 16 | (sp[7] & 0xff) << 24
# | (sp[8] & 0xffff) << 32 | (sp[9] & 0xffff) << 48
# | (sp[10] & 0xfff) << 0
# Xt = (sp[11] & 63) << 0 | (sp[12] & 63) << 6
# | (sp[13] & 0xff) << 12 | (sp[14] & 0xff) << 20
# | (sp[15] & 0xff) << 28 | (sp[16] & 0xff) << 36
# | (sp[17] & 0xff) << 44 | (sp[18] & 1) << 52 | (sp[19] & 0xff) << 56
fcol2img = akg.tvm.const(sp[0], 'uint64') + akg.tvm.const(sp[1] * 2**16, 'uint64') \
+ akg.tvm.const(sp[2] * 2**32, 'uint64') + akg.tvm.const(sp[3] * 2**40, 'uint64') \
+ akg.tvm.const(sp[4] * 2**48, 'uint64') + akg.tvm.const(sp[5] * 2**56, 'uint64')
xm = akg.tvm.const(sp[6] * 2**16, 'uint64') + akg.tvm.const(sp[7] * 2**24, 'uint64') \
+ akg.tvm.const(sp[8] * 2**32, 'uint64') + akg.tvm.const(sp[9] * 2**48, 'uint64') \
+ akg.tvm.const(sp[10], 'uint64')
xt = akg.tvm.const(sp[11], 'uint64') + akg.tvm.const(sp[12] * 2**6, 'uint64') \
+ akg.tvm.const(sp[13] * 2**12, 'uint64') + akg.tvm.const(sp[14] * 2**20, 'uint64') \
+ akg.tvm.const(sp[15] * 2**28, 'uint64') + akg.tvm.const(sp[16] * 2**36, 'uint64') \
+ akg.tvm.const(sp[17] * 2**44, 'uint64') + akg.tvm.const(sp[18] * 2**52, 'uint64') \
+ akg.tvm.const(sp[19] * 2**56, 'uint64')
return (fcol2img, xm, xt)
def intrin_func(ins, outs):
sp = [input_w, input_h, pad_left, pad_right, pad_top, pad_bottom, # fmatrix
w_idx_kernel, h_idx_kernel, w_idx, h_idx, c1_idx, # xm
stride_w, stride_h, kernel_w, kernel_h, dilation_w, dilation_h, jump_offset, repeat_mode, repeat_time]
aa = ins[0]
bb = outs[0]
ib = akg.tvm.ir_builder.create()
fcol2img, xm, xt = pack_args(sp)
ib.emit(akg.tvm.call_extern(dtype, "set_fcol2img", fcol2img))
ib.emit(akg.tvm.call_extern(dtype, "vector_dup",
bb.access_ptr("w"), 0,
(input_w * input_h * 16 + 63) // 64, 1, 1, 8, 8))
for kh in range(kernel_h):
for kw in range(kernel_w):
sp[6] = kw
sp[7] = kh
_, xm, xt = pack_args(sp)
offset = (kh * kernel_h + kw) * output_h * output_w * 16
ib.emit(akg.tvm.call_extern(dtype, "col2img", bb.access_ptr("rw"),
aa.access_ptr("r", offset=offset), xm, xt))
return ib.get()
with akg.tvm.build_config(offset_factor=1):
return akg.tvm.decl_tensor_intrin(result.op,
intrin_func,
binds={input_data: input_data_buff, result: result_buff})
|
python-midonetclient/src/midonetclient/tenant.py | yantarou/midonet | 221 | 11107636 | <reponame>yantarou/midonet
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Midokura PTE LTD.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from midonetclient import bridge
from midonetclient import chain
from midonetclient import port_group
from midonetclient import resource_base
from midonetclient import router
from midonetclient import vendor_media_type
class Tenant(resource_base.ResourceBase):
media_type = vendor_media_type.APPLICATION_TENANT_JSON
def __init__(self, uri, dto, auth):
super(Tenant, self).__init__(uri, dto, auth)
def get_name(self):
return self.dto['name']
def get_id(self):
return self.dto['id']
def get_description(self):
return self.dto['description']
def get_enabled(self):
return self.dto['enabled']
def name(self, name):
self.dto['name'] = name
return self
def description(self, description):
self.dto['description'] = description
return self
def enabled(self, enabled):
self.dto['enabled'] = enabled
return self
def get_bridges(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_BRIDGE_COLLECTION_JSON}
return self.get_children(self.dto['bridges'], query, headers,
bridge.Bridge)
def get_chains(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_CHAIN_COLLECTION_JSON}
return self.get_children(self.dto['chains'], query, headers,
chain.Chain)
def get_port_groups(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_PORTGROUP_COLLECTION_JSON}
return self.get_children(self.dto['portGroups'], query, headers,
port_group.PortGroup)
def get_routers(self, query=None):
headers = {'Accept':
vendor_media_type.APPLICATION_ROUTER_COLLECTION_JSON}
return self.get_children(self.dto['routers'], query, headers,
router.Router)
|
igibson/utils/data_utils/ext_object/scripts_wip/query_object_placement_probs.py | suresh-guttikonda/iGibson | 360 | 11107649 | import argparse
import json
import os
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
def input_number_or_name(selection_name, options):
if len(options) == 1:
print("Only %s option is %s" % (selection_name, options[0]))
return options[0]
for i, option in enumerate(options):
print("%s option %d: %s" % (selection_name, i, option))
user_input = input("Input number or name to assign probability, or done to finish\n")
if user_input == "done":
return "done"
try:
obj_num = int(user_input)
choice = options[obj_num]
except:
if user_input.lower() not in [option.lower() for option in options]:
print("Input %s is not valid, try again" % user_input)
return False
choice = [option for option in options if option.lower() == user_input.lower()][0]
return choice
def main(args):
object_cat_dir = "data/ig_dataset/objects/%s" % (args.object_cat)
if not os.path.isdir(object_cat_dir):
print("%s is not a valid object" % (args.object_cat))
return
if args.object_id:
object_ids = [args.object_id]
else:
object_ids = os.listdir(object_cat_dir)
obj_json_paths = []
existing_placement_rules = {}
obj_probs = {}
total_prob = 0.0
for object_id in object_ids:
obj_dir = "%s/%s/misc/" % (object_cat_dir, object_id)
if not os.path.isdir(obj_dir):
print("%s %s is not a valid object" % (args.object_cat, object_id))
return
obj_json_path = "%s/placement_probs.json" % (obj_dir)
if os.path.isfile(obj_json_path) and not args.overwrite:
if args.add:
if total_prob == 0.0:
with open(obj_json_path, "r") as f:
obj_probs = json.load(f)
total_prob = 1.0
elif not args.overwrite:
print("%s exists and overwrite false, quitting" % obj_json_path)
return
obj_json_paths.append(obj_json_path)
scene_names = [
"Beechwood_1_int",
"Benevolence_1_int",
"Ihlen_0_int",
"Merom_0_int",
"Pomaria_0_int",
"Pomaria_2_int",
"Wainscott_0_int",
"Beechwood_0_int",
"Benevolence_0_int",
"Benevolence_2_int",
"Ihlen_1_int",
"Merom_1_int",
"Pomaria_1_int",
"Rs_int",
"Wainscott_1_int",
]
support_obj_dicts = []
for scene_name in scene_names:
support_objs_json = "data/ig_dataset/scenes/%s/misc/all_support_objs.json" % scene_name
if os.path.isfile(support_objs_json):
with open(support_objs_json, "r") as f:
support_obj_dicts += json.load(f)
else:
settings = MeshRendererSettings(enable_shadow=False, msaa=False, enable_pbr=False)
s = Simulator(mode="headless", image_width=800, image_height=800, rendering_settings=settings)
simulator = s
scene = InteractiveIndoorScene(scene_name, texture_randomization=False, object_randomization=False)
s.import_ig_scene(scene)
for obj_name in scene.objects_by_name:
obj = scene.objects_by_name[obj_name]
if not obj.supporting_surfaces:
continue
info_dict = {}
info_dict["name"] = obj_name
info_dict["category"] = obj.category
info_dict["room"] = obj.in_rooms[0]
info_dict["supporting_surface_types"] = list(obj.supporting_surfaces.keys())
support_obj_dicts.append(info_dict)
with open(support_objs_json, "w") as f:
json.dump(support_obj_dicts, f)
s.disconnect()
unique_categories = set()
unique_rooms = set()
room_category_support_types = {}
for support_obj_dict in support_obj_dicts:
obj_category = support_obj_dict["category"]
unique_categories.add(obj_category)
obj_room = support_obj_dict["room"][:-2]
unique_rooms.add(obj_room)
room_category_support_types[(obj_category, obj_room)] = support_obj_dict["supporting_surface_types"]
unique_categories = list(unique_categories)
unique_rooms = list(unique_rooms)
room_categories = {room: set() for room in unique_rooms}
for support_obj_dict in support_obj_dicts:
obj_category = support_obj_dict["category"]
obj_room = support_obj_dict["room"][:-2]
room_categories[obj_room].add(obj_category)
for room in room_categories:
room_categories[room] = list(room_categories[room])
done = False
while not done:
room = input_number_or_name("room", unique_rooms)
while not room:
room = input_number_or_name("room", unique_rooms)
if room == "done":
break
categories = room_categories[room]
obj_category = input_number_or_name("object category", categories)
while not obj_category:
obj_category = input_number_or_name("object category", categories)
if obj_category == "done":
break
support_types = room_category_support_types[(obj_category, room)]
support_type = input_number_or_name("support_type", support_types)
while not support_type:
support_type = input_number_or_name("support_type", support_types)
if support_type == "done":
break
prob = float(
input(
"Enter probability for object %s being %s %s in room %s\n"
% (args.object_cat, support_type, obj_category, room)
)
)
obj_probs["%s-%s-%s" % (obj_category, room, support_type)] = prob
total_prob += prob
for key in obj_probs:
obj_probs[key] /= total_prob
for obj_json_path in obj_json_paths:
with open(obj_json_path, "w") as f:
json.dump(obj_probs, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Configure which surfaces and containers in a scene an object might go in."
)
parser.add_argument("object_cat", type=str, default=None)
parser.add_argument("--object_id", type=str, default=None)
parser.add_argument("--overwrite", action="store_true", default=False)
parser.add_argument("--add", action="store_true", default=False)
args = parser.parse_args()
main(args)
|
cxxtest/python/convert.py | coreyp1/graphlab | 333 | 11107691 | #
# Execute this script to copy the cxxtest/*.py files
# and run 2to3 to convert them to Python 3.
#
import glob
import subprocess
import os
import shutil
os.chdir('cxxtest')
for file in glob.glob('*.py'):
shutil.copyfile(file, '../python3/cxxtest/'+file)
#
os.chdir('../python3/cxxtest')
#
for file in glob.glob('*.py'):
subprocess.call('2to3 -w '+file, shell=True)
|
ntm/updates.py | snipsco/ntm-lasagne | 316 | 11107717 | import theano
import theano.tensor as T
import numpy as np
from lasagne.updates import get_or_compute_grads
from collections import OrderedDict
def graves_rmsprop(loss_or_grads, params, learning_rate=1e-4, chi=0.95, alpha=0.9, epsilon=1e-4):
r"""
<NAME>' RMSProp [1]_.
.. math ::
n_{i} &= \chi * n_i-1 + (1 - \chi) * grad^{2}\\
g_{i} &= \chi * g_i-1 + (1 - \chi) * grad\\
\Delta_{i} &= \alpha * Delta_{i-1} - learning_rate * grad /
sqrt(n_{i} - g_{i}^{2} + \epsilon)\\
w_{i} &= w_{i-1} + \Delta_{i}
References
----------
.. [1] Graves, Alex.
"Generating Sequences With Recurrent Neural Networks", p.23
arXiv:1308.0850
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
n = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
g = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
delta = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
n_ip1 = chi * n + (1. - chi) * grad ** 2
g_ip1 = chi * g + (1. - chi) * grad
delta_ip1 = alpha * delta - learning_rate * grad / T.sqrt(n_ip1 + \
g_ip1 ** 2 + epsilon)
updates[n] = n_ip1
updates[g] = g_ip1
updates[delta] = delta_ip1
updates[param] = param + delta_ip1
return updates
|
examples/callcontext/client.py | brubbel/Pyro4 | 638 | 11107727 | <gh_stars>100-1000
from __future__ import print_function
import sys
import Pyro4
import uuid
# example: set a single correlation id on the context that should be passed along
Pyro4.current_context.correlation_id = uuid.uuid4()
print("correlation id set to:", Pyro4.current_context.correlation_id)
if sys.version_info < (3, 0):
input = raw_input
# custom proxy needed to get to annotation data, before Pyro 4.56
class CustomAnnotationProxy(Pyro4.Proxy):
def __init__(self, uri):
super(CustomAnnotationProxy, self).__init__(uri)
self._pyroHmacKey = b"secr3t_k3y"
# override the method that adds annotations and add our own
def _pyroAnnotations(self):
return {"XYZZ": b"Hello, I am a custom annotation from the proxy!"}
def _pyroResponseAnnotations(self, annotations, msgtype):
print(" Got response (msgtype=%d). Annotations:" % msgtype)
for key in annotations:
if key == "CORR":
value = uuid.UUID(bytes=annotations[key])
elif key == "HMAC":
value = "[...]"
else:
value = annotations[key]
print(" {0} -> {1}".format(key, value))
uri = input("Enter the URI of the server object: ")
print("\n------- (older) method to get annotations via callback on custom proxy... -----\n")
with CustomAnnotationProxy(uri) as proxy:
print("Sending a few messages using one proxy...")
for i in range(4):
msg = proxy.echo("hello-%d" % i)
proxies = [CustomAnnotationProxy(uri) for _ in range(5)]
for p in proxies:
print("Sending one message from new proxy...")
msg = p.echo("hello-%d" % id(p))
p._pyroRelease()
with CustomAnnotationProxy(uri) as proxy:
# oneway
print("Sending a oneway message... (should only print a connection ok response)")
proxy.oneway("hello-ONEWAY-1")
print("Sending another oneway message... (should not print a response at all)")
proxy.oneway("hello-ONEWAY-2")
# asynchronous
print("Asynchronous proxy message...")
proxy._pyroAsync()
result = proxy.echo("hello-ASYNC")
_ = result.value
print("\n------- get annotations via normal proxy and the call context... -----\n")
input("press enter:")
# the code below works as of Pyro 4.56.
with Pyro4.Proxy(uri) as proxy:
proxy._pyroHmacKey = b"secr3t_k3y"
print("normal call")
Pyro4.current_context.annotations = {"XYZZ": b"custom annotation from client via new way(1)"}
result = proxy.echo("hi there - new method of annotation access in client")
print("Annotations in response were: ", Pyro4.current_context.response_annotations)
print("\noneway call")
Pyro4.current_context.annotations = {"XYZZ": b"custom annotation from client via new way(2)"}
proxy.oneway("hi there ONEWAY - new method of annotation access in client")
print("Annotations in response were: ", Pyro4.current_context.response_annotations)
print(" (should be empty because oneway!)")
print("\nSee the console output on the server for more results.")
|
tools/generate_intersection_routes.py | L-Net-1992/transfuser | 447 | 11107828 | <filename>tools/generate_intersection_routes.py
import glob
import os
import sys
import lxml.etree as ET
import argparse
import random
import time
import carla
SAMPLING_DISTANCE = [100]
def add_intersection(transform, root, route_id):
'''
Sample (start wp, end wp) pair along the canonical axes in a 100x100 grid
Args:
transform: carla transform of the grid center (position of traffic light)
root: root of the xml tree structure
route_id: route counter
'''
x, y, yaw = transform.location.x, transform.location.y, transform.rotation.yaw
req_yaw = yaw + 180.0 # the vehicle should be opposite the traffic light
for dist in SAMPLING_DISTANCE:
for mul in [-1, 1]:
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x+mul*dist), y='%f'%(y), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y+mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x+mul*dist), y='%f'%y, z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y-mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x-mul*dist), y='%f'%(y), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y+mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x+mul*dist), y='%f'%y, z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y-mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
return root, route_id
def add_intersection_subsample(transform, root, route_id):
'''
Same function as above but samples 75% fewer routes
Args:
transform: carla transform of the grid center (position of traffic light)
root: root of the xml tree structure
route_id: route counter
'''
x, y, yaw = transform.location.x, transform.location.y, transform.rotation.yaw
req_yaw = yaw + 180.0 # the vehicle should be opposite the traffic light
for dist in SAMPLING_DISTANCE:
for mul in [-1, 1]:
if random.randint(0,7) == 0:
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x+mul*dist), y='%f'%(y), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y+mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
if random.randint(0,7) == 0:
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x+mul*dist), y='%f'%y, z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y-mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
if random.randint(0,7) == 0:
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x-mul*dist), y='%f'%(y), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y+mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
if random.randint(0,7) == 0:
route = ET.SubElement(root, 'route', id='%d'%route_id, town=args.town)
ET.SubElement(route, 'waypoint', x='%f'%(x+mul*dist), y='%f'%y, z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
ET.SubElement(route, 'waypoint', x='%f'%(x), y='%f'%(y-mul*dist), z='0.0',
pitch='0.0', roll='0.0', yaw='%f'%req_yaw)
route_id += 1
return root, route_id
def main():
client = carla.Client('localhost', 2100)
client.set_timeout(200.0)
world = client.load_world(args.town)
print ('loaded world')
actors = world.get_actors()
traffic_lights_list = actors.filter('*traffic_light')
print ('got %d traffic lights'%len(traffic_lights_list))
# each traffic light group at an intersection counted once
count = 0
route_id = 0
root = ET.Element('routes')
traffic_light_visited = []
for traffic_light in traffic_lights_list:
if traffic_light.id not in traffic_light_visited:
traffic_light_visited.append(traffic_light.id)
count += 1
if not args.subsample:
root, route_id = add_intersection(traffic_light.get_transform(), root, route_id)
else:
root, route_id = add_intersection_subsample(traffic_light.get_transform(), root, route_id)
for adjacent_traffic_light in traffic_light.get_group_traffic_lights():
traffic_light_visited.append(adjacent_traffic_light.id)
print ('unique intersections: ', count)
tree = ET.ElementTree(root)
if args.save_file is not None:
tree.write(args.save_file, xml_declaration=True, encoding='utf-8', pretty_print=True)
len_tree = 0
for _ in tree.iter('route'):
len_tree += 1
print ('total routes: ', len_tree)
if __name__ == '__main__':
global args
parser = argparse.ArgumentParser()
parser.add_argument('--save_file', type=str, required=False, default=None, help='xml file path to save the route waypoints')
parser.add_argument('--town', type=str, default='Town05', help='town for generating routes')
parser.add_argument('--subsample', action='store_true', default=False, help='sample 75% fewer routes')
args = parser.parse_args()
main()
|
backend/lk/models/appstore_app.py | Purus/LaunchKitDocker | 2,341 | 11107832 | <filename>backend/lk/models/appstore_app.py
# encoding: utf-8
#
# Copyright 2016 Cluster Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from django.db import models
from djorm_pgarray.fields import TextArrayField
from backend.lk.models.apimodel import APIModel
class AppStoreApp(APIModel):
ENCRYPTED_ID_KEY_TOKEN = 'appstoreapp'
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
itunes_id = models.CharField(max_length=128, unique=True)
bundle_id = models.CharField(max_length=128, unique=True)
app_info_ingestion_time = models.DateTimeField(null=True)
app_info_countries = TextArrayField(null=True)
decorated_country = None
decorated_info = None
def __getattr__(self, attr):
return getattr(self.decorated_info, attr)
@property
def itunes_url(self):
return 'https://itunes.apple.com/us/app/id%s' % self.itunes_id
@property
def public_small_icon(self):
return self.icon_60
@property
def public_medium_icon(self):
return self.icon_512
def to_dict(self):
return {
'id': self.encrypted_id,
'country': self.country,
'version': self.version,
'names': {
'short': self.short_name,
'full': self.name,
},
'icon': {
'small': self.public_small_icon,
'medium': self.public_medium_icon,
},
'iTunesId': self.itunes_id,
'bundleId': self.bundle_id,
'developer': self.developer_name,
}
|
spokestack/nlu/parsers/__init__.py | jonra1993/WakeWord-Detection | 139 | 11107850 | import re
DIGIT_SPLIT_RE = re.compile("[-,()\\s]+")
|
single_view_mpi/libs/mpi.py | deepneuralmachine/google-research | 23,901 | 11107899 | <reponame>deepneuralmachine/google-research<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Utilities for working with Multiplane Images (MPIs).
A multiplane image is a set of RGB + alpha textures, positioned as fronto-
parallel planes at specific depths from a reference camera. It represents a
lightfield and can be used to render new views from nearby camera positions
by warping each texture according to its plane homography and combining the
results with an over operation. More detail at:
https://people.eecs.berkeley.edu/~tinghuiz/projects/mpi/
In this code, an MPI is represented by a tensor of layer textures and a tensor
of depths:
layers: [..., L, H, W, 4] -- L is the number of layers, last dimension is
typically RGBA but it can be any number of channels as long as the
last channel is alpha.
depths: [..., L] -- distances of the planes from the reference camera.
Layers and depths are stored back-to-front, i.e. farthest layer ("layer 0")
comes first. Typically the depths are chosen so that the corresponding
disparities (inverse depths) form an arithmetic sequence.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from single_view_mpi.libs import geometry
from single_view_mpi.libs import utils
@utils.name_scope
def layer_visibility(alphas):
"""Compute visibility for each pixel in each layer.
Visibility says how unoccluded each pixel is by the corresponding pixels in
front of it (i.e. those pixels with the same (x,y) position in subsequent
layers). The front layer has visibility 1 everywhere since nothing can occlude
it. Each other layer has visibility equal to the product of (1 - alpha) for
all the layers in front of it.
Args:
alphas: [..., L, H, W, 1] Alpha channels for L layers, back to front.
Returns:
[..., L, H, W, 1] visibilities.
"""
return tf.math.cumprod(
1.0 - alphas, axis=-4, exclusive=True, reverse=True)
@utils.name_scope
def layer_weights(alphas):
"""Compute contribution weights for each layer from a set of alpha channels.
The weights w_i for each layer are determined from the layer alphas so that
to composite the layers we simple multiply each by its weight and add them
up. In other words, the weight says how much each layer contributes to the
final composed image.
For alpha-blending, the weight of a layer at a point is its visibility at that
point times its alpha at that point, i.e:
alpha_i * (1 - alpha_i+1) * (1 - alpha_i+2) * ... (1 - alpha_n-1)
If the first (i.e. the back) layer has alpha=1 everywhere, then the output
weights will sum to 1 at each point.
Args:
alphas: [..., L, H, W, 1] Alpha channels for L layers, back to front.
Returns:
[..., L, H, W, 1] The resulting layer weights.
"""
return alphas * layer_visibility(alphas)
@utils.name_scope
def compose_back_to_front(images):
"""Compose a set of images (for example, RGBA), back to front.
Args:
images: [..., L, H, W, C+1] Set of L images, with alpha in the last channel.
Returns:
[..., H, W, C] Composed image.
"""
weights = layer_weights(images[Ellipsis, -1:])
return tf.reduce_sum(images[Ellipsis, :-1] * weights, axis=-4)
@utils.name_scope
def disparity_from_layers(layers, depths):
"""Compute disparity map from a set of MPI layers.
From reference view.
Args:
layers: [..., L, H, W, C+1] MPI layers, back to front.
depths: [..., L] depths for each layer.
Returns:
[..., H, W, 1] Single-channel disparity map from reference viewpoint.
"""
disparities = 1.0 / depths
# Add height, width and channel axes to disparities, so it can broadcast.
disparities = disparities[Ellipsis, tf.newaxis, tf.newaxis, tf.newaxis]
weights = layer_weights(layers[Ellipsis, -1:])
# Weighted sum of per-layer disparities:
return tf.reduce_sum(disparities * weights, axis=-4)
@utils.name_scope
def make_depths(front_depth, back_depth, num_planes):
"""Returns a list of MPI plane depths, back to front.
The first element in the list will be back_depth, and last will be
near-depth, and in between there will be num_planes intermediate
depths, which are interpolated linearly in disparity.
Args:
front_depth: The depth of the front-most MPI plane.
back_depth: The depth of the back-most MPI plane.
num_planes: The total number of planes to create.
Returns:
[num_planes] A tensor of depths sorted in descending order (so furthest
first). This order is useful for back to front compositing.
"""
assert front_depth < back_depth
front_disparity = 1.0 / front_depth
back_disparity = 1.0 / back_depth
disparities = tf.linspace(back_disparity, front_disparity, num_planes)
return 1.0 / disparities
@utils.name_scope
def render_layers(layers,
depths,
pose,
intrinsics,
target_pose,
target_intrinsics,
height=None,
width=None,
clamp=True):
"""Render target layers from MPI representation.
Args:
layers: [..., L, H, W, C] MPI layers, back to front.
depths: [..., L] MPI plane depths, back to front.
pose: [..., 3, 4] reference camera pose.
intrinsics: [..., 4] reference intrinsics.
target_pose: [..., 3, 4] target camera pose.
target_intrinsics: [..., 4] target intrinsics.
height: height to render to in pixels (or None for input height).
width: width to render to in pixels (or None for input width).
clamp: whether to clamp image coordinates (see geometry.sample_image doc),
i.e. extending the image beyond its size or not.
Returns:
[..., L, height, width, C] The layers warped to the target view by applying
an appropriate homography to each one.
"""
source_to_target_pose = geometry.mat34_product(
target_pose, geometry.mat34_pose_inverse(pose))
# Add a dimension to correspond to L in the poses and intrinsics.
pose = pose[Ellipsis, tf.newaxis, :, :] # [..., 1, 3, 4]
target_pose = target_pose[Ellipsis, tf.newaxis, :, :] # [..., 1, 3, 4]
intrinsics = intrinsics[Ellipsis, tf.newaxis, :] # [..., 1, 4]
target_intrinsics = target_intrinsics[Ellipsis, tf.newaxis, :] # [..., 1, 4]
# Fronto-parallel plane equations at the given depths, in the reference
# camera's frame.
normals = tf.constant([0.0, 0.0, 1.0], shape=[1, 3])
depths = -depths[Ellipsis, tf.newaxis] # [..., L, 1]
normals, depths = utils.broadcast_to_match(normals, depths, ignore_axes=1)
planes = tf.concat([normals, depths], axis=-1) # [..., L, 4]
homographies = geometry.inverse_homography(pose, intrinsics, target_pose,
target_intrinsics,
planes) # [..., L, 3, 3]
# Each of the resulting [..., L] homographies knows how to inverse-warp one
# of the [..., (H,W), L] images into a new [... (H',W')] target images.
target_layers = geometry.homography_warp(
layers, homographies, height=height, width=width, clamp=clamp)
# The next few lines implement back-face culling.
#
# We don't want to render content that is behind the camera. (If we did, we
# might see upside-down images of the layers.) A typical graphics approach
# would be to test each pixel of each layer against a near-plane and discard
# those that are in front of it. Here we implement something cheaper:
# back-face culling. If the target camera sees the "back" of a layer then we
# set that layer's alpha to zero. This is simple and sufficient in practice
# to avoid nasty artefacts.
# Convert planes to target camera space. target_planes is [..., L, 4]
target_planes = geometry.mat34_transform_planes(source_to_target_pose, planes)
# Fourth coordinate of plane is negative distance in front of the camera.
# target_visible is [..., L]
target_visible = tf.cast(target_planes[Ellipsis, -1] < 0.0, dtype=tf.float32)
# per_layer_alpha is [..., L, 1, 1, 1]
per_layer_alpha = target_visible[Ellipsis, tf.newaxis, tf.newaxis, tf.newaxis]
# Multiply alpha channel by per_layer_alpha:
non_alpha_channels = target_layers[Ellipsis, :-1]
alpha = target_layers[Ellipsis, -1:] * per_layer_alpha
target_layers = tf.concat([non_alpha_channels, alpha], axis=-1)
return target_layers
@utils.name_scope
def render(layers,
depths,
pose,
intrinsics,
target_pose,
target_intrinsics,
height=None,
width=None,
clamp=True):
"""Render target image from MPI representation.
Args:
layers: [..., L, H, W, C+1] MPI layers back to front, alpha in last channel.
depths: [..., L] MPI plane depths, back to front
pose: [..., 3, 4] reference camera pose
intrinsics: [..., 4] reference intrinsics
target_pose: [..., 3, 4] target camera pose
target_intrinsics: [..., 4] target intrinsics
height: height to render to in pixels (or None for input height)
width: width to render to in pixels (or None for input width)
clamp: whether to clamp image coordinates (see geometry.sample_image doc).
i.e. extending the image beyond its size or not
Returns:
[...., height, width, C] Rendered image at the target view.
"""
target_layers = render_layers(
layers,
depths,
pose,
intrinsics,
target_pose,
target_intrinsics,
height=height,
width=width,
clamp=clamp)
return compose_back_to_front(target_layers)
|
project/game/bots_battle/replays/test_tenhou_encoder.py | MahjongRepository/tenhou-python-bot | 201 | 11108010 | <filename>project/game/bots_battle/replays/test_tenhou_encoder.py<gh_stars>100-1000
from game.bots_battle.replays.tenhou import TenhouReplay
from mahjong.meld import Meld
from utils.test_helpers import make_meld
def test_encode_called_chi():
meld = make_meld(Meld.CHI, tiles=[26, 29, 35])
meld.who = 3
meld.from_who = 2
meld.called_tile = 29
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "19895"
meld = make_meld(Meld.CHI, tiles=[4, 11, 13])
meld.who = 1
meld.from_who = 0
meld.called_tile = 4
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "3303"
def test_encode_called_pon():
meld = make_meld(Meld.PON, tiles=[104, 105, 107])
meld.who = 0
meld.from_who = 1
meld.called_tile = 105
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "40521"
meld = make_meld(Meld.PON, tiles=[124, 126, 127])
meld.who = 0
meld.from_who = 2
meld.called_tile = 124
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "47658"
def test_encode_called_daiminkan():
meld = make_meld(Meld.KAN, tiles=[100, 101, 102, 103])
meld.who = 2
meld.from_who = 3
meld.called_tile = 103
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "26369"
def test_encode_called_shouminkan():
meld = make_meld(Meld.SHOUMINKAN, tiles=[112, 113, 115, 114])
meld.who = 2
meld.from_who = 3
meld.called_tile = 114
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "44113"
def test_encode_called_ankan():
meld = make_meld(Meld.KAN, tiles=[72, 73, 74, 75])
meld.who = 2
meld.from_who = 2
meld.called_tile = 74
replay = TenhouReplay("", [], "")
result = replay._encode_meld(meld)
assert result == "18944"
|
insights/parsr/examples/httpd_conf.py | lhuett/insights-core | 121 | 11108016 | <gh_stars>100-1000
import string
from insights.parsr import (Char, EOF, EOL, EndTagName, Forward, FS, GT, LT,
Letters, Lift, LineEnd, Many, Number, OneLineComment, PosMarker,
QuotedString, skip_none, StartTagName, String, WS, WSChar)
from insights.parsr.query import Directive, Entry, Section
def loads(data):
return Entry(children=Top(data)[0])
def load(f):
return loads(f.read())
def simple_to_entry(name, attrs):
return Directive(name=name.value, attrs=attrs, lineno=name.lineno)
def complex_to_entry(tag, children):
name, attrs = tag
return Section(name=name.value, attrs=attrs, children=children, lineno=name.lineno)
Complex = Forward()
Num = Number & (WSChar | LineEnd)
Cont = Char("\\") + EOL
StartName = WS >> PosMarker(StartTagName(Letters)) << WS
EndName = WS >> EndTagName(Letters) << WS
Comment = (WS >> OneLineComment("#")).map(lambda x: None)
AttrStart = Many(WSChar)
AttrEnd = (Many(WSChar) + Cont) | Many(WSChar)
BareAttr = String(set(string.printable) - (set(string.whitespace) | set(";{}<>\\'\"")))
Attr = AttrStart >> (Num | BareAttr | QuotedString) << AttrEnd
Attrs = Many(Attr)
StartTag = (WS + LT) >> (StartName + Attrs) << (GT + WS)
EndTag = (WS + LT + FS) >> EndName << (GT + WS)
Simple = WS >> (Lift(simple_to_entry) * PosMarker(Letters) * Attrs) << WS
Stanza = Simple | Complex | Comment
Complex <= (Lift(complex_to_entry) * StartTag * Many(Stanza).map(skip_none)) << EndTag
Doc = Many(Stanza).map(skip_none)
Top = Doc + EOF
|
astroNN/data/__init__.py | igomezv/astroNN | 156 | 11108018 | <reponame>igomezv/astroNN
# ---------------------------------------------------------#
# astroNN.data.__init__: tools for loading data
# ---------------------------------------------------------#
import os
import astroNN
def datapath():
"""
Get astroNN embedded data path
:return: full path to embedded data folder
:rtype: str
:History:
| 2018-May-16 - Written - <NAME> (University of Toronto)
| 2019-July-02 - Updated - <NAME> (University of Toronto)
"""
return os.path.join(os.path.dirname(astroNN.__path__[0]), 'astroNN', 'data')
def data_description():
"""
Print data description for astroNN embedded data
:History:
| 2018-May-16 - Written - <NAME> (University of Toronto)
| 2019-July-02 - Updated - <NAME> (University of Toronto)
"""
items = {
'anderson_2017_dr14_parallax.npz': 'Anderson 2017 improved Gaia TGAS parallax from Data-Driven Stellar Model',
'dr13_contmask.npz': "APOGEE DR13 continuum mask from Bovy's APOGEE tools",
'dr14_contmask.npz': "APOGEE DR14 continuum mask from Bovy's APOGEE tools",
'dr16_contmask.npz': "APOGEE DR16 continuum mask",
'gaiadr2_apogeedr14_parallax.npz': 'Gaia DR2 - APOGEE DR14 matches, indices corresponds '
'to APOGEE allstar DR14 file',
'aspcap_l31c_masks.npy': 'ASPCAP l31c (DR14) elements windows mask represented by bits on the 7514px spectrum',
'tf1_12.patch': 'Patch required to make astroNN fully functional with Tensorflow 1.12.x, also used in test suite',
'tf1_14.patch': 'Patch required to make astroNN fully functional with Tensorflow 1.14.x',
}
for item in items:
print(item, ': ', items[item])
|
scripts/rename_logged_data.py | mageofboy/pylot | 231 | 11108080 | <reponame>mageofboy/pylot
import glob
import os
def two_values_before_number(filebase, file_format='png'):
for index, filename in enumerate(glob.glob(filebase)):
vals = filename.split('-')
new_filename = ('test@' + vals[0] + '-' + vals[1] + '-' +
str(index) + '.' + file_format)
os.rename(filename, new_filename)
for filename in (glob.glob('test@*')):
vals = filename.split('@')
os.rename(filename, vals[1])
def one_value_before_number(filebase, file_format='png'):
for index, filename in enumerate(glob.glob(filebase)):
vals = filename.split('-')
new_filename = 'test@' + vals[0] + '-' + str(index) + '.' + file_format
os.rename(filename, new_filename)
for filename in (glob.glob('test@*')):
vals = filename.split('@')
os.rename(filename, vals[1])
def main():
two_values_before_number('annotated*')
one_value_before_number('signs*')
one_value_before_number('bboxes*', file_format='json')
two_values_before_number('center*')
two_values_before_number('left*')
two_values_before_number('right*')
two_values_before_number('lidar*', file_format='ply')
two_values_before_number('segmented*')
two_values_before_number('perfect-detector*')
two_values_before_number('depth*', file_format='pkl')
if __name__ == '__main__':
main()
|
tods/searcher/__init__.py | ZhuangweiKang/tods | 544 | 11108098 | from .brute_force_search import BruteForceSearch
|
saleor/graphql/app/tests/mutations/test_app_token_delete.py | fairhopeweb/saleor | 15,337 | 11108102 | <filename>saleor/graphql/app/tests/mutations/test_app_token_delete.py<gh_stars>1000+
import graphene
from .....app.error_codes import AppErrorCode
from .....app.models import App, AppToken
from ....tests.utils import assert_no_permission, get_graphql_content
APP_TOKEN_DELETE_MUTATION = """
mutation appTokenDelete($id: ID!){
appTokenDelete(id: $id){
errors{
field
message
code
}
appToken{
name
authToken
}
}
}
"""
def test_app_token_delete(
permission_manage_apps,
staff_api_client,
staff_user,
app,
permission_manage_products,
):
query = APP_TOKEN_DELETE_MUTATION
token = app.tokens.get()
staff_user.user_permissions.add(permission_manage_products)
app.permissions.add(permission_manage_products)
id = graphene.Node.to_global_id("AppToken", token.id)
staff_user.user_permissions.add(permission_manage_apps)
variables = {"id": id}
response = staff_api_client.post_graphql(query, variables=variables)
get_graphql_content(response)
assert not AppToken.objects.filter(id=token.id).first()
def test_app_token_delete_for_app(
permission_manage_apps,
app_api_client,
permission_manage_products,
):
app = App.objects.create(name="New_app", is_active=True)
token = AppToken.objects.create(app=app)
query = APP_TOKEN_DELETE_MUTATION
token = app.tokens.get()
requestor = app_api_client.app
requestor.permissions.add(permission_manage_products)
app.permissions.add(permission_manage_products)
id = graphene.Node.to_global_id("AppToken", token.id)
variables = {"id": id}
response = app_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
get_graphql_content(response)
assert not AppToken.objects.filter(id=token.id).first()
def test_app_token_delete_no_permissions(staff_api_client, staff_user, app):
query = APP_TOKEN_DELETE_MUTATION
token = app.tokens.get()
id = graphene.Node.to_global_id("AppToken", token.id)
variables = {"id": id}
response = staff_api_client.post_graphql(query, variables=variables)
assert_no_permission(response)
token.refresh_from_db()
def test_app_token_delete_out_of_scope_app(
permission_manage_apps,
staff_api_client,
staff_user,
app,
permission_manage_products,
):
"""Ensure user can't delete app token with wider scope of permissions."""
query = APP_TOKEN_DELETE_MUTATION
token = app.tokens.get()
app.permissions.add(permission_manage_products)
id = graphene.Node.to_global_id("AppToken", token.id)
variables = {"id": id}
# for staff user
response = staff_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appTokenDelete"]
errors = data["errors"]
assert not data["appToken"]
assert len(errors) == 1
error = errors[0]
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_APP.name
assert error["field"] == "id"
assert AppToken.objects.filter(id=token.id).exists()
def test_app_token_delete_superuser_can_delete_token_for_any_app(
permission_manage_apps,
superuser_api_client,
app,
permission_manage_products,
):
"""Ensure superuser can delete app token for app with any scope of permissions."""
query = APP_TOKEN_DELETE_MUTATION
token = app.tokens.get()
app.permissions.add(permission_manage_products)
id = graphene.Node.to_global_id("AppToken", token.id)
variables = {"id": id}
response = superuser_api_client.post_graphql(query, variables=variables)
content = get_graphql_content(response)
data = content["data"]["appTokenDelete"]
errors = data["errors"]
assert data["appToken"]
assert not errors
assert not AppToken.objects.filter(id=token.id).exists()
def test_app_token_delete_for_app_out_of_scope_app(
permission_manage_apps,
app_api_client,
permission_manage_products,
):
app = App.objects.create(name="New_app", is_active=True)
token = AppToken.objects.create(app=app)
query = APP_TOKEN_DELETE_MUTATION
app.permissions.add(permission_manage_products)
id = graphene.Node.to_global_id("AppToken", token.id)
variables = {"id": id}
response = app_api_client.post_graphql(
query, variables=variables, permissions=(permission_manage_apps,)
)
content = get_graphql_content(response)
data = content["data"]["appTokenDelete"]
errors = data["errors"]
assert not data["appToken"]
assert len(errors) == 1
error = errors[0]
assert error["code"] == AppErrorCode.OUT_OF_SCOPE_APP.name
assert error["field"] == "id"
assert AppToken.objects.filter(id=token.id).exists()
|
drl_grasping/control/moveit2.py | Tiamat-Tech/drl_grasping | 126 | 11108103 | from moveit2 import MoveIt2Interface
from rclpy.executors import MultiThreadedExecutor
from threading import Thread
import rclpy
class MoveIt2(MoveIt2Interface):
def __init__(self, robot_model: str, separate_gripper_controller: bool = True, use_sim_time: bool = True, node_name: str = 'ign_moveit2_py'):
try:
rclpy.init()
except:
if not rclpy.ok():
import sys
sys.exit("ROS 2 could not be initialised")
super().__init__(robot_model=robot_model,
separate_gripper_controller=separate_gripper_controller,
use_sim_time=use_sim_time,
node_name=node_name)
self._moveit2_executor = MultiThreadedExecutor(1)
self._moveit2_executor.add_node(self)
thread = Thread(target=self._moveit2_executor.spin, args=())
thread.daemon = True
thread.start()
|
analysis/sys_id/make_random_waveform.py | leozz37/makani | 1,178 | 11108123 | <gh_stars>1000+
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write a low-pass-filtered Gaussian random timeseries to a text file."""
import sys
import gflags
import numpy as np
import scipy.signal
gflags.DEFINE_float('sample_rate', 100.0, 'Sample rate [Hz]')
gflags.DEFINE_integer('filter_order', 2, 'Order of low-pass filter')
gflags.DEFINE_float('mean', 130.0, 'Mean value of time series')
gflags.DEFINE_float('rms', 1.0, 'Standard deviation of time series')
gflags.DEFINE_float('ramp_time', 10.0, 'Ramp time [s]')
gflags.DEFINE_boolean('ramp_up', True, 'Apply ramp at beginning of waveform?')
gflags.DEFINE_boolean('ramp_down', True, 'Apply ramp at end of waveform?')
gflags.DEFINE_float('duration', 300.0, 'Time series duration [s]')
gflags.DEFINE_integer('columns', 8, 'Number of time series data columns')
gflags.DEFINE_float('cutoff_frequency', 2.0,
'Low-pass filter cutoff frequency [Hz]')
gflags.DEFINE_string('output_file', '', 'Output filename.')
gflags.DEFINE_boolean('use_motor_client_format', None,
'Produce data in a format suitable for the motor client.')
FLAGS = gflags.FLAGS
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '\nError: %s\n\nUsage: %s\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
nyquist_frequency = FLAGS.sample_rate / 2.0
b, a = scipy.signal.butter(FLAGS.filter_order,
FLAGS.cutoff_frequency / nyquist_frequency,
'low', analog=False)
settling_time = 5.0 * (1.0 / FLAGS.cutoff_frequency)
x = np.random.randn((FLAGS.duration + settling_time) * FLAGS.sample_rate)
y = scipy.signal.lfilter(b, a, x)
y = y[int(round(FLAGS.sample_rate * settling_time)):]
y *= FLAGS.rms / np.std(y)
y += FLAGS.mean
t = np.arange(len(y)) / FLAGS.sample_rate
# Apply ramp-up and/or ramp-down if desired.
ramp_samples = int(FLAGS.ramp_time * FLAGS.sample_rate)
if FLAGS.ramp_up:
y[0:ramp_samples] = np.linspace(0.0, y[ramp_samples], ramp_samples)
if FLAGS.ramp_down:
y[-ramp_samples:] = np.linspace(y[-ramp_samples], 0.0, ramp_samples)
filename = FLAGS.output_file
if not filename:
filename = 'blrms_%d_to_%d_mHz_rms_%.0f_mean_%.0f_order_%d.txt' % (
0.0,
FLAGS.cutoff_frequency * 1000,
FLAGS.rms,
FLAGS.mean,
FLAGS.filter_order)
print "Writing to '%s'..." % filename
output = np.tile(y, (FLAGS.columns, 1))
# Default "use_motor_client_format" to true if eight columns of data
# were requested.
if FLAGS.use_motor_client_format is None:
FLAGS.use_motor_client_format = (FLAGS.columns == 8)
# The motor client expects three blocks of eight columns each,
# giving, respectively, the commanded torque [Nm], the lower motor
# speed limit [rad/s], and the upper speed limit [rad/s]. To
# achieve a speed command, we set the upper and lower speed limits
# equal, and set the torque command (which does nothing) to zero.
if FLAGS.use_motor_client_format:
assert FLAGS.columns == 8
output = np.vstack((np.zeros(output.shape), output, output))
np.savetxt(filename, np.vstack((t, output)).T,
fmt='%0.02f' + ' %f' * output.shape[0])
if __name__ == '__main__':
main(sys.argv)
|
scripts/generate_spirv.py | samuelig/Vulkan-ValidationLayers | 432 | 11108136 | #!/usr/bin/env python3
#
# Copyright (c) 2016-2021 Valve Corporation
# Copyright (c) 2016-2021 LunarG, Inc.
# Copyright (c) 2016-2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compile GLSL to SPIR-V.
Depends on glslangValidator
"""
import os
import sys
import subprocess
import struct
import re
import argparse
SPIRV_MAGIC = 0x07230203
COLUMNS = 4
INDENT = 4
parser = argparse.ArgumentParser(description='Generate spirv code for this repository')
parser.add_argument('--outfilename', action='store', help='Output Filename')
parser.add_argument('infilename', action='store', type=str, help='Input Filename')
parser.add_argument('glslangvalidator', action='store', help='glslangvalidator')
args_in = parser.parse_args()
if not os.path.isfile(args_in.infilename):
sys.exit("Cannot find infilename " + args_in.infilename)
if not os.path.isfile(args_in.glslangvalidator):
sys.exit("Cannot find glslangvalidator " + args_in.glslangvalidator)
def identifierize(s):
# translate invalid chars
s = re.sub("[^0-9a-zA-Z_]", "_", s)
# translate leading digits
return re.sub("^[^a-zA-Z_]+", "_", s)
def compile(filename, tmpfile):
# invoke glslangValidator
try:
args = [args_in.glslangvalidator, "-V", "-H", "-o", tmpfile, filename]
output = subprocess.check_output(args, universal_newlines=True)
except subprocess.CalledProcessError as e:
raise(e.output)
# read the temp file into a list of SPIR-V words
words = []
with open(tmpfile, "rb") as f:
data = f.read()
assert(len(data) and len(data) % 4 == 0)
# determine endianness
fmt = ("<" if data[0] == (SPIRV_MAGIC & 0xff) else ">") + "I"
for i in range(0, len(data), 4):
words.append(struct.unpack(fmt, data[i:(i + 4)])[0])
assert(words[0] == SPIRV_MAGIC)
# remove temp file
os.remove(tmpfile)
return (words, output.rstrip())
base = os.path.basename(args_in.infilename)
words, comments = compile(args_in.infilename, base + ".tmp")
literals = []
for i in range(0, len(words), COLUMNS):
columns = ["0x%08x" % word for word in words[i:(i + COLUMNS)]]
literals.append(" " * INDENT + ", ".join(columns) + ",")
header = """#include <stdint.h>
#pragma once
// This file is ***GENERATED***. Do Not Edit.
/* Copyright (c) 2021 The Khronos Group Inc.
* Copyright (c) 2021 Valve Corporation
* Copyright (c) 2021 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: <NAME> <<EMAIL>>
*/
#if 0
%s
#endif
static const uint32_t %s[%d] = {
%s
};
""" % (comments, identifierize(base), len(words), "\n".join(literals))
if args_in.outfilename:
with open(args_in.outfilename, "w") as f:
print(header, end="", file=f)
else:
print(header, end="")
|
data_structures/binary_trees/print_odd_level_nodes.py | FatiahBalo/python-ds | 1,723 | 11108139 | # Print all the nodes at odd levels of a binary tree
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def print_odd(root, is_odd=True):
if not root:
return
if is_odd:
print(root.val, end=' ')
print_odd(root, not is_odd)
print_odd(root, not is_odd)
|
build/chip/java/jar_runner.py | summercms/connectedhomeip | 3,495 | 11108148 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright (c) 2020 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to run javac command as an action with gn."""
import os
import subprocess
import sys
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
def IsExecutable(path):
"""Returns whether file at |path| exists and is executable.
Args:
path: absolute or relative path to test.
Returns:
True if the file at |path| exists, False otherwise.
"""
return os.path.isfile(path) and os.access(path, os.X_OK)
def FindCommand(command):
"""Looks up for |command| in PATH.
Args:
command: name of the command to lookup, if command is a relative or
absolute path (i.e. contains some path separator) then only that
path will be tested.
Returns:
Full path to command or None if the command was not found.
On Windows, this respects the PATHEXT environment variable when the
command name does not have an extension.
"""
fpath, _ = os.path.split(command)
if fpath:
if IsExecutable(command):
return command
if sys.platform == 'win32':
# On Windows, if the command does not have an extension, cmd.exe will
# try all extensions from PATHEXT when resolving the full path.
command, ext = os.path.splitext(command)
if not ext:
exts = os.environ['PATHEXT'].split(os.path.pathsep)
else:
exts = [ext]
else:
exts = ['']
for path in os.environ['PATH'].split(os.path.pathsep):
for ext in exts:
path = os.path.join(path, command) + ext
if IsExecutable(path):
return path
return None
def main():
java_path = FindCommand('jar')
if not java_path:
sys.stderr.write('jar: command not found\n')
sys.exit(EXIT_FAILURE)
args = sys.argv[1:]
if len(args) < 1:
sys.stderr.write('usage: %s [jar_args]...\n' % sys.argv[0])
sys.exit(EXIT_FAILURE)
return subprocess.check_call([java_path] + args)
if __name__ == '__main__':
sys.exit(main())
|
pytouch/handlers/__init__.py | Pandinosaurus/PyTouch | 149 | 11108157 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .image import ImageHandler
from .sensor import SensorHandler
from .video import VideoHandler
|
keras_extensions/models.py | xgenpanda/keras_extension | 225 | 11108177 | from keras.models import Model, standardize_X
from keras.layers import containers
from keras import optimizers, objectives
from keras import backend as K
class SingleLayerUnsupervised(Model, containers.Sequential):
"""
Single layer unsupervised learning Model.
"""
# add Layer, adapted from keras.layers.containers.Sequential
def add(self, layer):
if len(self.layers) > 0:
warnings.warn('Cannot add more than one Layer to SingleLayerUnsupervised!')
return
super(SingleLayerUnsupervised, self).add(layer)
# compile theano graph, adapted from keras.models.Sequential
def compile(self, optimizer, loss):
self.optimizer = optimizers.get(optimizer)
self.loss = objectives.get(loss)
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
train_loss = self.loss(self.X_train)
test_loss = self.loss(self.X_test)
train_loss.name = 'train_loss'
test_loss.name = 'test_loss'
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)
updates += self.updates
if type(self.X_train) == list:
train_ins = self.X_train
test_ins = self.X_test
else:
train_ins = [self.X_train]
test_ins = [self.X_test]
self._train = K.function(train_ins, train_loss, updates=updates)
self._test = K.function(test_ins, test_loss)
# train model, adapted from keras.models.Sequential
def fit(self, X, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True, show_accuracy=False):
X = standardize_X(X)
val_f = None
val_ins = None
if validation_data or validation_split:
if show_accuracy:
val_f = self._test_with_acc
else:
val_f = self._test
if show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
else:
f = self._train
out_labels = ['loss']
ins = X# + [y, sample_weight]
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
|
workspace/lesson1.py | hito0512/tensorRT_Pro | 537 | 11108182 |
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 3, stride=1, padding=1, bias=True)
self.conv.weight.data.fill_(0.3)
self.conv.bias.data.fill_(0.2)
def forward(self, x):
x = self.conv(x)
# return x.view(int(x.size(0)), -1)
return x.view(-1, int(x.numel() // x.size(0)))
model = Model().eval()
x = torch.full((1, 1, 3, 3), 1.0)
y = model(x)
print(y)
torch.onnx.export(
model, (x, ), "lesson1.onnx", verbose=True
)
|
technology/sky130/modules/sky130_row_cap.py | im-world/OpenRAM | 335 | 11108193 | <gh_stars>100-1000
#!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California
# All rights reserved.
#
import debug
import design
from tech import cell_properties as props
class sky130_row_cap(design.design):
def __init__(self, version, name=""):
if version == "rowend":
cell_name = "sky130_fd_bd_sram__sram_sp_rowend"
elif version == "rowenda":
cell_name = "sky130_fd_bd_sram__sram_sp_rowenda"
elif version == "rowend_replica":
cell_name = "sky130_fd_bd_sram__openram_sp_rowend_replica"
elif version == "rowenda_replica":
cell_name = "sky130_fd_bd_sram__openram_sp_rowenda_replica"
else:
debug.error("Invalid type for row_end", -1)
super().__init__(name=name, cell_name=cell_name, prop=props.row_cap_1port_cell)
|
esphome/components/anova/climate.py | OttoWinter/esphomeyaml | 249 | 11108196 | import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import climate, ble_client
from esphome.const import CONF_ID, CONF_UNIT_OF_MEASUREMENT
UNITS = {
"f": "f",
"c": "c",
}
CODEOWNERS = ["@buxtronix"]
DEPENDENCIES = ["ble_client"]
anova_ns = cg.esphome_ns.namespace("anova")
Anova = anova_ns.class_(
"Anova", climate.Climate, ble_client.BLEClientNode, cg.PollingComponent
)
CONFIG_SCHEMA = (
climate.CLIMATE_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(Anova),
cv.Required(CONF_UNIT_OF_MEASUREMENT): cv.enum(UNITS),
}
)
.extend(ble_client.BLE_CLIENT_SCHEMA)
.extend(cv.polling_component_schema("60s"))
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
await climate.register_climate(var, config)
await ble_client.register_ble_node(var, config)
cg.add(var.set_unit_of_measurement(config[CONF_UNIT_OF_MEASUREMENT]))
|
spyne/util/resource.py | edustaff/spyne | 786 | 11108203 | <reponame>edustaff/spyne
# encoding: utf8
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import os.path
import spyne.util.autorel
def get_resource_path(ns, fn):
try:
from spyne._deploymentinfo import resource_filename
except ImportError:
from pkg_resources import resource_filename
resfn = resource_filename(ns, fn)
spyne.util.autorel.AutoReloader.FILES.add(resfn)
path = os.path.abspath(resfn)
return path
def get_resource_file(ns, fn):
return open(get_resource_path(ns, fn), 'rb')
def get_resource_file_contents(ns, fn, enc=None):
resfn = get_resource_path(ns, fn)
if enc is None:
return open(resfn, 'rb').read()
else:
return open(resfn, 'rb').read().decode(enc)
def parse_xml_resource(ns, fn):
from lxml import etree
retval = etree.parse(get_resource_file(ns, fn))
return retval.getroot()
def parse_html_resource(ns, fn):
from lxml import html
retval = html.parse(get_resource_file(ns, fn))
return retval.getroot()
def parse_cloth_resource(ns, fn):
from lxml import html
retval = html.fragment_fromstring(get_resource_file_contents(ns, fn),
create_parent='spyne-root')
retval.attrib['spyne-tagbag'] = ''
return retval
|
tests/test_elements/test_ui_window.py | glipR/pygame_gui | 339 | 11108286 | import os
import pytest
import pygame
import pygame_gui
from tests.shared_fixtures import _init_pygame, default_ui_manager
from tests.shared_fixtures import default_display_surface, _display_surface_return_none
from tests.shared_comparators import compare_surfaces
from pygame_gui.elements.ui_window import UIWindow
from pygame_gui.elements.ui_button import UIButton
from pygame_gui.core.interfaces import IUIManagerInterface
from pygame_gui import UIManager
from pygame_gui.elements.ui_drop_down_menu import UIDropDownMenu
try:
# mouse button constants not defined in pygame 1.9.3
pygame.BUTTON_LEFT
pygame.BUTTON_MIDDLE
pygame.BUTTON_RIGHT
except AttributeError:
pygame.BUTTON_LEFT = 1
pygame.BUTTON_MIDDLE = 2
pygame.BUTTON_RIGHT = 3
class TestUIWindow:
def test_creation(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
UIWindow(pygame.Rect(100, 100, 200, 200), window_display_title="Test Window",
manager=default_ui_manager, element_id='window')
def test_set_blocking(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager, element_id='window')
button = UIButton(relative_rect=pygame.Rect(10, 10, 150, 30),
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
allow_double_clicks=True)
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': pygame.BUTTON_LEFT,
'pos': button.rect.center}))
assert button.held
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': pygame.BUTTON_LEFT,
'pos': button.rect.center}))
window.set_blocking(True)
default_ui_manager.process_events(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': pygame.BUTTON_LEFT,
'pos': button.rect.center}))
assert not button.held
def test_set_minimum_dimensions(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
window.set_minimum_dimensions((200, 200))
window.set_dimensions((100, 100))
assert window.rect.size == (200, 200)
window.set_minimum_dimensions((250, 250))
assert window.rect.size == (250, 250)
def test_set_dimensions(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
assert button.rect.topright == (window.get_container().rect.right - 10,
window.get_container().rect.top + 10)
assert button.rect.topright == (374, 253)
window.set_dimensions((300, 400))
assert button.rect.topright == (window.get_container().rect.right - 10,
window.get_container().rect.top + 10)
assert button.rect.topright == (474, 253)
def test_set_relative_position(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
assert button.rect.topright == (374, 253)
window.set_relative_position((100, 100))
assert button.rect.topright == (274, 153)
def test_set_position(self, _init_pygame, default_ui_manager: IUIManagerInterface,
_display_surface_return_none):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
assert button.rect.topright == (374, 253)
window.set_position((100, 100))
assert button.rect.topright == (274, 153)
def test_process_event(self, _init_pygame, default_ui_manager,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window,
object_id='#specific_id_test',
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
button.process_event(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': pygame.BUTTON_LEFT,
'pos': button.rect.center}))
button.process_event(pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': pygame.BUTTON_LEFT,
'pos': button.rect.center}))
confirm_event_fired = False
event_object_id = None
for event in pygame.event.get():
if (event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_BUTTON_PRESSED and
event.ui_element == button):
confirm_event_fired = True
event_object_id = event.ui_object_id
assert confirm_event_fired
assert event_object_id == 'window.#specific_id_test'
consumed_event = window.process_event(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': pygame.BUTTON_RIGHT,
'pos': window.rect.center}))
assert consumed_event is True
window.edge_hovering[0] = True
consumed_event = window.process_event(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': pygame.BUTTON_LEFT,
'pos': window.rect.topleft}))
assert consumed_event and window.resizing_mode_active
consumed_event = window.process_event(pygame.event.Event(pygame.MOUSEBUTTONUP,
{'button': pygame.BUTTON_LEFT,
'pos': (500, 500)}))
assert not (consumed_event or window.resizing_mode_active)
consumed_event = window.process_event(pygame.event.Event(pygame.USEREVENT,
{'user_type': pygame_gui.UI_BUTTON_PRESSED,
'ui_element': window.close_window_button}))
assert not (consumed_event or window.alive())
def test_check_clicked_inside(self, _init_pygame,
default_ui_manager,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
clicked_inside = window.check_clicked_inside_or_blocking(pygame.event.Event(pygame.MOUSEBUTTONDOWN,
{'button': 1,
'pos': (100, 100)}))
assert clicked_inside is True
def test_update(self, _init_pygame, default_ui_manager,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
window.update(time_delta=0.05)
window.title_bar.held = True
window.update(time_delta=0.05)
window.update(time_delta=0.05)
window.title_bar.held = False
window.update(time_delta=0.05)
window.resizing_mode_active = True
window.start_resize_rect = pygame.Rect(0, 0, 190, 190)
window.edge_hovering[0] = True
window.edge_hovering[1] = True
window.update(time_delta=0.05)
window.edge_hovering[0] = False
window.edge_hovering[1] = False
window.edge_hovering[2] = True
window.edge_hovering[3] = True
window.update(time_delta=0.05)
def test_check_hover(self, _init_pygame, default_ui_manager: UIManager,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(100, 100, 200, 200), window_display_title="Test Window",
manager=default_ui_manager,
resizable=True)
default_ui_manager.mouse_position = (window.rect.left + window.shadow_width,
window.rect.centery)
window.resizing_mode_active = False
window.check_hover(0.05, False)
assert window.edge_hovering[0]
default_ui_manager.mouse_position = (window.rect.right - window.shadow_width,
window.rect.centery)
window.resizing_mode_active = False
window.check_hover(0.05, False)
assert window.edge_hovering[2]
default_ui_manager.mouse_position = (window.rect.centerx,
window.rect.top + window.shadow_width)
window.resizing_mode_active = False
window.check_hover(0.05, False)
assert window.edge_hovering[1]
default_ui_manager.mouse_position = (window.rect.centerx,
window.rect.bottom - window.shadow_width)
window.resizing_mode_active = False
window.check_hover(0.05, False)
assert window.edge_hovering[3]
default_ui_manager.mouse_position = (800, 800)
window.resizing_mode_active = True
assert window.check_hover(0.05, False)
window.resizing_mode_active = False
window.is_blocking = True
assert window.check_hover(0.05, False)
window.is_blocking = False
assert not window.check_hover(0.05, False)
def test_get_top_layer(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
window = UIWindow(pygame.Rect(0, 0, 400, 300), window_display_title="Test Window",
manager=default_ui_manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
menu = UIDropDownMenu(options_list=['eggs', 'flour', 'sugar'],
starting_option='eggs',
relative_rect=pygame.Rect(10, 10, 150, 30),
manager=default_ui_manager,
container=window)
assert window.get_top_layer() == 4
window.update(0.05)
assert window.get_top_layer() == 6
def test_change_layer(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
assert window.get_top_layer() == 4
window.change_layer(10)
assert window.get_top_layer() == 12
window.update(0.05)
assert window.get_top_layer() == 12
def test_kill(self, _init_pygame,
default_ui_manager: IUIManagerInterface,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
assert len(default_ui_manager.get_root_container().elements) == 2
assert len(default_ui_manager.get_sprite_group().sprites()) == 6
assert default_ui_manager.get_sprite_group().sprites() == [default_ui_manager.get_root_container(),
window,
window._window_root_container,
window.window_element_container,
window.title_bar,
window.close_window_button
]
window.kill()
confirm_event_fired = False
event_object_id = None
for event in pygame.event.get():
if (event.type == pygame.USEREVENT and event.user_type == pygame_gui.UI_WINDOW_CLOSE and
event.ui_element == window):
confirm_event_fired = True
event_object_id = event.ui_object_id
assert confirm_event_fired
assert event_object_id == 'window'
assert len(default_ui_manager.get_root_container().elements) == 0
assert len(default_ui_manager.get_sprite_group().sprites()) == 1
assert default_ui_manager.get_sprite_group().sprites() == [default_ui_manager.get_root_container()]
def test_rebuild_from_changed_theme_data_non_default(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes", "ui_window_non_default.json"))
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=manager)
assert window.image is not None
def test_using_theme_prototype(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes", "ui_window_prototype.json"))
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=manager)
button_rect = pygame.Rect(0, 0, 150, 30)
button_rect.topright = (-10, 10)
button = UIButton(relative_rect=button_rect,
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=manager,
container=window,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'})
assert window.image is not None
assert window.shadow_width == 1
assert window.border_width == 2
assert window.shape_corner_radius == 10
assert button.shadow_width == 1
assert button.border_width == 2
assert button.shape_corner_radius == 4
def test_rebuild_from_changed_theme_data_no_title_bar(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes", "ui_window_no_title_bar.json"))
window = UIWindow(pygame.Rect(0, 0, 200, 200), window_display_title="Test Window",
manager=manager)
assert window.title_bar is None
assert window.close_window_button is None
assert window.image is not None
@pytest.mark.filterwarnings("ignore:Invalid value")
@pytest.mark.filterwarnings("ignore:Colour hex code")
@pytest.mark.filterwarnings("ignore:Invalid gradient")
@pytest.mark.filterwarnings("ignore:Unable to load")
def test_rebuild_from_changed_theme_data_bad_values(self, _init_pygame,
_display_surface_return_none):
manager = UIManager((800, 600), os.path.join("tests", "data",
"themes", "ui_window_bad_values.json"))
window = UIWindow(pygame.Rect(0, 0, 200, 200),
window_display_title="Test Window",
manager=manager)
assert window.image is not None
def test_stub_methods(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
window = UIWindow(pygame.Rect(100, 100, 200, 200),
window_display_title="Test Window",
manager=default_ui_manager)
window.focus()
window.unfocus()
def test_disable(self, _init_pygame: None, default_ui_manager: UIManager,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_1 = UIButton(relative_rect=pygame.Rect(10, 10, 150, 30),
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window)
button_2 = UIButton(relative_rect=pygame.Rect(10, 50, 150, 30),
text="Test Button 2",
manager=default_ui_manager,
container=window)
window.disable()
assert window.is_enabled is False
assert window.title_bar.is_enabled is False
assert button_1.is_enabled is False
assert button_2.is_enabled is False
# process a mouse button down event
button_1.process_event(
pygame.event.Event(pygame.MOUSEBUTTONDOWN, {'button': 1, 'pos': button_1.rect.center}))
# process a mouse button up event
button_1.process_event(
pygame.event.Event(pygame.MOUSEBUTTONUP, {'button': 1, 'pos': button_1.rect.center}))
button_1.update(0.01)
assert button_1.check_pressed() is False
def test_enable(self, _init_pygame: None, default_ui_manager: UIManager,
_display_surface_return_none: None):
window = UIWindow(pygame.Rect(200, 200, 200, 200), window_display_title="Test Window",
manager=default_ui_manager)
button_1 = UIButton(relative_rect=pygame.Rect(10, 10, 150, 30),
text="Test Button",
tool_tip_text="This is a test of the button's tool tip functionality.",
manager=default_ui_manager,
container=window)
button_2 = UIButton(relative_rect=pygame.Rect(10, 50, 150, 30),
text="Test Button 2",
manager=default_ui_manager,
container=window)
window.disable()
window.enable()
assert window.is_enabled is True
assert window.title_bar.is_enabled is True
assert button_1.is_enabled is True
assert button_2.is_enabled is True
# process a mouse button down event
button_1.process_event(
pygame.event.Event(pygame.MOUSEBUTTONDOWN, {'button': 1, 'pos': button_1.rect.center}))
# process a mouse button up event
button_1.process_event(
pygame.event.Event(pygame.MOUSEBUTTONUP, {'button': 1, 'pos': button_1.rect.center}))
button_1.update(0.01)
assert button_1.check_pressed() is True
def test_show(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
window = UIWindow(pygame.Rect(100, 100, 200, 200),
window_display_title="Test Window",
manager=default_ui_manager,
visible=0)
assert window.visible == 0
assert window._window_root_container.visible == 0
assert window.title_bar.visible == 0
assert window.window_element_container.visible == 0
assert window.close_window_button.visible == 0
window.show()
assert window.visible == 1
assert window._window_root_container.visible == 1
assert window.title_bar.visible == 1
assert window.window_element_container.visible == 1
assert window.close_window_button.visible == 1
def test_hide(self, _init_pygame, default_ui_manager,
_display_surface_return_none):
window = UIWindow(pygame.Rect(100, 100, 200, 200),
window_display_title="Test Window",
manager=default_ui_manager)
assert window.visible == 1
assert window._window_root_container.visible == 1
assert window.title_bar.visible == 1
assert window.window_element_container.visible == 1
assert window.close_window_button.visible == 1
window.hide()
assert window.visible == 0
assert window._window_root_container.visible == 0
assert window.title_bar.visible == 0
assert window.window_element_container.visible == 0
assert window.close_window_button.visible == 0
def test_show_hide_rendering(self, _init_pygame, default_ui_manager, _display_surface_return_none):
resolution = (400, 400)
empty_surface = pygame.Surface(resolution)
empty_surface.fill(pygame.Color(0, 0, 0))
surface = empty_surface.copy()
manager = UIManager(resolution)
window = UIWindow(pygame.Rect(100, 100, 400, 400),
window_display_title="Test Window",
manager=manager,
visible=0)
manager.update(0.01)
manager.draw_ui(surface)
assert compare_surfaces(empty_surface, surface)
surface.fill(pygame.Color(0, 0, 0))
window.show()
manager.update(0.01)
manager.draw_ui(surface)
assert not compare_surfaces(empty_surface, surface)
surface.fill(pygame.Color(0, 0, 0))
window.hide()
manager.update(0.01)
manager.draw_ui(surface)
assert compare_surfaces(empty_surface, surface)
|
retrieval_pt/predict.py | baidu/knowledge-driven-dialogue | 299 | 11108308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: predict.py
"""
import torch
import torch.nn.functional as F
import source.inputters.data_provider as reader
from source.models.retrieval_model import RetrievalModel
from args import base_parser
from args import print_arguments
def build_data(args):
"""
build test data
"""
task_name = args.task_name.lower()
processor = reader.MatchProcessor(data_dir=args.data_dir,
task_name=task_name,
vocab_path=args.vocab_path,
max_seq_len=args.max_seq_len,
do_lower_case=args.do_lower_case)
test_data_generator = processor.data_generator(
batch_size=args.batch_size,
phase='test',
epoch=1,
shuffle=False,
device=args.gpu)
num_test_examples = processor.get_num_examples(phase='test')
test_data = [test_data_generator, num_test_examples]
return processor, test_data
def build_model(args, num_labels):
"""
build retrieval model
"""
retrieval_model = RetrievalModel(emb_size=args.emb_size,
n_layer=args.layers,
n_head=args.heads,
voc_size=args.voc_size,
sent_types=2,
num_labels=num_labels,
dropout=args.dropout,
use_knowledge=args.use_knowledge,
share_embedding=args.share_embedding,
padding_idx=0,
use_gpu=args.use_gpu)
checkpoint = torch.load(args.init_checkpoint,
map_location=lambda storage, loc: storage)
retrieval_model.load_state_dict(checkpoint['model'])
return retrieval_model
def main(args):
"""
main
"""
args.use_gpu = torch.cuda.is_available() and args.use_gpu and args.gpu >= 0
args.gpu = args.gpu if args.use_gpu else -1
torch.cuda.set_device(args.gpu)
if 'kn' in args.task_name:
args.use_knowledge = True
else:
args.use_knowledge = False
processor, test_data = build_data(args)
args.voc_size = len(open(args.vocab_path, 'r').readlines())
num_labels = len(processor.get_labels())
retrieval_model = build_model(args, num_labels)
out_scores = open(args.output, 'w')
test_data_generator, num_test_examples = test_data
for batch_id, data in enumerate(test_data_generator()):
inputs, _ = data["inputs"]
positions, _ = data["positions"]
senttypes, _ = data["senttypes"]
labels, _ = data["labels"]
knowledge, _ = data["knowledge"] if args.use_knowledge and "knowledge" in data else [None, None]
outputs = retrieval_model.score(inputs, positions, senttypes, knowledge)
outputs = F.softmax(outputs, dim=1)
scores = outputs.tolist()
for i, score in enumerate(scores):
out_scores.write("%.4f\n" % (score[1]))
out_scores.flush()
out_scores.close()
if __name__ == '__main__':
args = base_parser()
print_arguments(args)
main(args)
|
py/phl/phlsys_multiprocessing__t.py | aevri/phabricator-tools | 150 | 11108334 | """Test suite for phlsys_multiprocessing."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# TODO
# -----------------------------------------------------------------------------
# Tests:
# TODO
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import multiprocessing
import unittest
import phlsys_multiprocessing
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_logging_context_breathing(self):
def logger_config():
logging.basicConfig()
with phlsys_multiprocessing.logging_context(logger_config):
logging.debug("logging test")
def test_multiresource_breathing(self):
def factory():
return "resource"
# make sure that we can get a resource in the main process
multi_resource = phlsys_multiprocessing.MultiResource(1, factory)
with multi_resource.resource_context() as resource:
self.assertEqual("resource", resource)
with multi_resource.resource_context() as resource:
self.assertEqual("resource", resource)
def test_multiresource_changes_propagate(self):
def worker(resource):
with resource.resource_context() as r:
r.append("worker process")
def factory():
return ["main process"]
multi_resource = phlsys_multiprocessing.MultiResource(1, factory)
worker_list = []
num_workers = 5
for _ in xrange(num_workers):
worker_list.append(
multiprocessing.Process(target=worker, args=(multi_resource,)))
worker_list[-1].start()
for w in worker_list:
w.join()
with multi_resource.resource_context() as r:
self.assertEqual(len(r), num_workers + 1)
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
mantraml/core/management/commands/ui.py | cclauss/mantra | 330 | 11108368 | import os
import shutil
import uuid
import subprocess
import mantraml
from mantraml.core.management.commands.BaseCommand import BaseCommand
class UICmd(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("project_path", default=".", type=str, nargs="?")
return parser
def handle(self, args, unknown):
path = args.project_path
path = os.path.abspath(path)
project_root = None
# check if the path is to an mantra.yml file or contains it
if path.endswith("mantra.yml"):
project_root = os.path.dirname(path)
else:
if os.path.isfile(os.path.join(path, "mantra.yml")):
project_root = path
# run the Django server if we found the project root
if project_root:
cmd = ["python", "manage.py", "runserver"]
os.environ["MANTRA_PROJECT_ROOT"] = project_root
cwd = os.path.join(os.path.dirname(mantraml.__file__), "ui")
subprocess.run(cmd, cwd=cwd)
else:
if path == os.path.abspath("."):
print("ERROR: Cannot find mantra.yml in the current directory")
else:
print("ERROR: Path '%s' does not contain an mantra project" % path)
|
example/blog_gcdatastore.py | ice1x/Flask-Blogging | 767 | 11108384 | from flask import Flask, render_template_string, redirect
from flask_login import UserMixin, LoginManager, login_user, logout_user
from flask_blogging import SQLAStorage, BloggingEngine
from flask_blogging.gcdatastore import GoogleCloudDatastore
from flask_fileupload.storage.gcstorage import GoogleCloudStorage
from flask_fileupload import FlaskFileUpload
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/path/to/credentials.json"
app = Flask(__name__)
app.config["SECRET_KEY"] = "secret" # for WTF-forms and login
app.config["BLOGGING_URL_PREFIX"] = "/blog"
app.config["BLOGGING_DISQUS_SITENAME"] = "test"
app.config["BLOGGING_SITEURL"] = "http://localhost:8000"
app.config["BLOGGING_SITENAME"] = "My Site"
app.config["BLOGGING_TWITTER_USERNAME"] = "@me"
app.config["BLOGGING_ALLOW_FILEUPLOAD"] = True
app.config["FILEUPLOAD_LOCALSTORAGE_IMG_FOLDER"] = "fileupload"
app.config["FILEUPLOAD_PREFIX"] = "/fileupload"
app.config["FILEUPLOAD_ALLOWED_EXTENSIONS"] = ["png", "jpg", "jpeg", "gif"]
# extensions
"""Google Cloud Storage configuration docs:
https://github.com/Speedy1991/Flask-FileUpload/tree/master/doc/google_cloud_storage.md
"""
gcstorage = GoogleCloudStorage(app)
file_upload = FlaskFileUpload(app, storage=gcstorage)
gc_datastore = GoogleCloudDatastore()
blog_engine = BloggingEngine(app, gc_datastore, file_upload=file_upload)
login_manager = LoginManager(app)
class User(UserMixin):
def __init__(self, user_id):
self.id = user_id
def get_name(self):
return "<NAME>" # typically the user's name
@login_manager.user_loader
@blog_engine.user_loader
def load_user(user_id):
return User(user_id)
index_template = """
<!DOCTYPE html>
<html>
<head> </head>
<body>
{% if current_user.is_authenticated %}
<a href="/logout/"> Logout </a>
{% else %}
<a href="/login/"> Login </a>
{% endif %}
  <a href="/blog/"> Blog </a>
  <a href="/blog/sitemap.xml">Sitemap</a>
  <a href="/blog/feeds/all.atom.xml">ATOM</a>
  <a href="/fileupload/">FileUpload</a>
</body>
</html>
"""
@app.route("/")
def index():
return render_template_string(index_template)
@app.route("/login/")
def login():
user = User("testuser")
login_user(user)
return redirect("/blog")
@app.route("/logout/")
def logout():
logout_user()
return redirect("/")
if __name__ == "__main__":
app.run(debug=True, port=8000, use_reloader=True)
|
one-day-build-discord-bot/discord-bot.py | dapopov-st/python-youtube-code | 262 | 11108399 | <reponame>dapopov-st/python-youtube-code
import os
import discord
import asyncio
import random
import datetime
token = os.getenv("DISCORD_API_TOKEN")
bot = discord.Client()
@bot.event
async def on_member_join(member):
if member.id == bot.id:
return
channel = discord.utils.get(bot.guilds[0].channels, name="general")
response = f"Welcome to Kite Fitness, {member.name}."
await channel.send(response)
@bot.event
async def on_message(message):
print(vars(bot))
if message.author == bot.user:
return
channel = message.channel
keywords = ["work", "workout", "push", "push up", "up", "fitness", "health", "muscle", "pull"]
for keyword in keywords:
if keyword.lower() in message.content.lower():
response = f"Did someone say {keyword.lower()}? Drop and give me 10 <@{message.author.id}>!"
await channel.send(response)
@bot.event
async def pushup_reminder():
while(True):
await bot.wait_until_ready()
online_members = []
for member in bot.get_all_members():
if member.status != discord.Status.offline and member.id != bot.user.id:
online_members.append(member.id)
if len(online_members) > 0:
user = random.choice(online_members)
current_time = int(datetime.datetime.now().strftime("%I"))
channel = discord.utils.get(bot.guilds[0].channels, name="general")
message = f"It's {current_time} o'clock! Time for some push ups <@{user}>"
await channel.send(message)
await asyncio.sleep(3600)
bot.loop.create_task(pushup_reminder())
bot.run(token)
|
utils/plot.py | magicly/sample-factory | 320 | 11108413 | <filename>utils/plot.py
MAP_FIGURE_ID = 2
HEATMAP_FIGURE_ID = 3
|
examples/obtain_statistics_lord_of_rings.py | alifaraz14/a-nice-mc | 118 | 11108426 | <gh_stars>100-1000
import os
import sys
import numpy as np
sys.path.append(os.getcwd())
def prior(bs):
return np.random.normal(0.0, 1.0, [bs, 2])
if __name__ == '__main__':
from a_nice_mc.objectives.expression.lord_of_rings import LordOfRings
from a_nice_mc.utils.hmc import HamiltonianMonteCarloSampler
from a_nice_mc.utils.statistics import obtain_statistics
os.environ['CUDA_VISIBLE_DEVICES'] = ''
energy_fn = LordOfRings(display=False)
sampler = HamiltonianMonteCarloSampler(energy_fn, prior)
obtain_statistics(sampler, steps=5000, burn_in=1000, batch_size=32)
|
Exec/radiation_tests/RadShestakovBolstad/python/paper.py | MargotF/Castro | 178 | 11108534 | #!/usr/bin/env python
from pylab import *
from read_gnu import *
import cPickle as pickle
n = '0200'
ew, x, t = read_gnu_file('../run-paper/eint_'+n+'.gnu')
Ew, x, t = read_gnu_file('../run-paper/Er_'+n+'.gnu')
fid = open('SBunits.p', 'rb')
units = pickle.load(fid)
fid.close()
Tw = (ew/units['cv']) / units['Temp']
Ew = Ew / units['Eg']
x = x / units['L']
t = t / units['time']
print 't = ', t
ex = array([0.0, 0.2, 0.4, 0.46, 0.47, 0.48, 0.49, 0.50, 0.51, 0.52, 0.53, 0.54, 0.6, 0.8, 1.0])
eT = array([9.9373253e-1, 9.9339523e-1, 9.8969664e-1, 9.8060848e-1, 9.7609654e-1, 9.6819424e-1,
9.5044751e-1, 4.9704000e-1, 4.3632445e-2, 2.5885608e-2, 1.7983134e-2, 1.3470947e-2,
4.3797848e-3, 6.4654865e-4, 1.9181546e-4])
eE = array([5.6401674e-3, 5.5646351e-3, 5.1047352e-3, 4.5542134e-3, 4.3744933e-3, 4.1294850e-3,
3.7570008e-3, 2.9096931e-3, 2.0623647e-3, 1.6898183e-3, 1.4447063e-3, 1.2648409e-3,
7.1255738e-4, 2.3412650e-4, 1.0934921e-4])
figure(1, figsize=(7,6.5))
subplots_adjust(left=0.15, bottom=0.08, right=0.97, top=0.97, wspace=0,hspace=0)
ax1 = subplot(211)
plot(ex, eE, 'o', markeredgecolor='k', markerfacecolor='w', markeredgewidth=2, markersize=8)
plot(x,Ew,'k')
xticks(visible=False)
xlim(0,1.05)
#ylim(0.0,0.14)
ylabel('$E_r$')
minorticks_on()
text(0.9,0.86,'(a)', transform = ax1.transAxes)
ax2 = subplot(212)
plot(ex,eT,'o', markeredgecolor='k', markerfacecolor='w', markeredgewidth=2, markersize=8)
plot(x,Tw,'k')
legend(['Exact', 'Numerical'], loc='lower left')
xlabel('x')
xlim(0,1.05)
ylim(-0.04,1.14)
ylabel('$T$')
minorticks_on()
text(0.9,0.86,'(b)', transform = ax2.transAxes)
draw()
show()
|
node/rules.bzl | bowlofstew/rules_protobuf | 252 | 11108548 | load("@org_pubref_rules_node//node:rules.bzl", "node_module")
load("//cpp:rules.bzl", "cpp_proto_repositories")
load("//node:deps.bzl", "DEPS")
load("//protobuf:rules.bzl",
"proto_compile",
"proto_language_deps",
"proto_repositories")
def node_proto_repositories(
omit_cpp_repositories = False,
lang_deps = DEPS,
lang_requires = [
# "npm_protobuf_stack",
# "npm_grpc",
],
**kwargs):
if not omit_cpp_repositories:
cpp_proto_repositories(**kwargs)
rem = proto_repositories(lang_deps = lang_deps,
lang_requires = lang_requires,
**kwargs)
# Load remaining (special) deps
for dep in rem:
rule = dep.pop("rule")
if "npm_repository" == rule:
fail("Unknown loading rule %s for %s" % (rule, dep))
#npm_repository(**dep)
else:
fail("Unknown loading rule %s for %s" % (rule, dep))
def _get_js_variable_name(file):
name = file.basename.rstrip(".js")
# Deal with special characters here?
return name
def _node_proto_module_impl(ctx):
compilation = ctx.attr.compilation.proto_compile_result
index_js = ctx.new_file("%s/index.js" % ctx.label.name)
exports = {}
for unit in compilation.transitive_units:
for file in unit.outputs:
if file.path.endswith("_pb.js"):
name = _get_js_variable_name(file)
exports[name] = file.short_path
elif file.path.endswith("_grpc_pb.js"):
name = _get_js_variable_name(file)
exports[name] = file.short_path
content = []
content.append("module.exports = {")
for name, path in exports.items():
content.append(" '%s': require('./%s')," % (name, path))
content.append("}")
ctx.file_action(
output = index_js,
content = "\n".join(content)
)
return struct(
files = depset([index_js]),
)
_node_proto_module = rule(
implementation = _node_proto_module_impl,
attrs = {
"compilation": attr.label(
providers = ["proto_compile_result"],
mandatory = True,
)
}
)
def node_proto_compile(langs = [str(Label("//node"))], **kwargs):
proto_compile(langs = langs, **kwargs)
def node_proto_library(
name,
langs = [str(Label("//node"))],
protos = [],
imports = [],
inputs = [],
output_to_workspace = False,
proto_deps = [
],
protoc = None,
pb_plugin = None,
pb_options = [],
proto_compile_args = {},
srcs = [],
deps = [
"@yarn_modules//:google-protobuf",
],
data = [],
verbose = 0,
with_grpc = False,
**kwargs):
proto_compile_args += {
"name": name + ".pb",
"protos": protos,
"deps": [dep + ".pb" for dep in proto_deps],
"langs": langs,
"imports": imports,
"inputs": inputs,
"pb_options": pb_options,
"output_to_workspace": output_to_workspace,
"verbose": verbose,
"with_grpc": with_grpc,
}
if protoc:
proto_compile_args["protoc"] = protoc
if pb_plugin:
proto_compile_args["pb_plugin"] = pb_plugin
proto_compile(**proto_compile_args)
_node_proto_module(
name = name + "_index",
compilation = name + ".pb",
)
node_module(
name = name,
index = name + "_index",
layout = "workspace",
srcs = srcs + [name + ".pb"],
data = data + [dep + ".pb" for dep in proto_deps],
deps = depset(deps + proto_deps).to_list(),
**kwargs)
|
leet/trees/flatten.py | monishshah18/python-cp-cheatsheet | 140 | 11108555 | <reponame>monishshah18/python-cp-cheatsheet
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
"""
Morris Traversal
5
/ \
2 1
\ / \
6 10 11
/
44
\
23
5
\
2
\
6
/ \
44 1
\ / \
23 10 11
5
\
2
\
6
\
44
\
23
\
1
/ \
10 11
1
/ \
2 5
/ \ \
3 4 6
1
\
2
/ \
3 4
\
5
\
6
"""
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
# Get rightmostgrandchild
# change left child to right child
# attach former right child to rightmostgrandchild
if not root:
return None
node = root
while node:
if node.left:
rightmost = node.left
while rightmost.right:
rightmost = rightmost.right
rightmost.right = node.right
node.right = node.left
node.left = None
node = node.right
|
tools/device_file_generator/dfg/avr/avr_mcu.py | roboterclubaachen/xpcc | 161 | 11108598 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
mcu = \
[
{"device": "at32uc3a0512", "mcu": "uc3a0512"},
{"device": "at90can128", "mcu": "c128"},
{"device": "at90can32", "mcu": "c32"},
{"device": "at90can64", "mcu": "c64"},
{"device": "at90pwm2", "mcu": "pwm2"},
{"device": "at90pwm2b", "mcu": "pwm2b"},
{"device": "at90pwm3", "mcu": "pwm3"},
{"device": "at90pwm316", "mcu": "pwm316"},
{"device": "at90pwm3b", "mcu": "pwm3b"},
{"device": "at90s1200", "mcu": "1200"},
{"device": "at90s2313", "mcu": "2313"},
{"device": "at90s2333", "mcu": "2333"},
{"device": "at90s2343", "mcu": "2343"},
{"device": "at90s4414", "mcu": "4414"},
{"device": "at90s4433", "mcu": "4433"},
{"device": "at90s4434", "mcu": "4434"},
{"device": "at90s8515", "mcu": "8515"},
{"device": "at90s8535", "mcu": "8535"},
{"device": "at90usb1286", "mcu": "usb1286"},
{"device": "at90usb1287", "mcu": "usb1287"},
{"device": "at90usb162", "mcu": "usb162"},
{"device": "at90usb646", "mcu": "usb646"},
{"device": "at90usb647", "mcu": "usb647"},
{"device": "at90usb82", "mcu": "usb82"},
{"device": "atmega103", "mcu": "m103"},
{"device": "atmega128", "mcu": "m128"},
{"device": "atmega1280", "mcu": "m1280"},
{"device": "atmega1281", "mcu": "m1281"},
{"device": "atmega1284p", "mcu": "m1284p"},
{"device": "atmega1284rfr2", "mcu": "m1284rfr2"},
{"device": "atmega128rfa1", "mcu": "m128rfa1"},
{"device": "atmega128rfr2", "mcu": "m128rfr2"},
{"device": "atmega16", "mcu": "m16"},
{"device": "atmega161", "mcu": "m161"},
{"device": "atmega162", "mcu": "m162"},
{"device": "atmega163", "mcu": "m163"},
{"device": "atmega164p", "mcu": "m164p"},
{"device": "atmega168", "mcu": "m168"},
{"device": "atmega168p", "mcu": "m168p"},
{"device": "atmega169", "mcu": "m169"},
{"device": "atmega16u2", "mcu": "m16u2"},
{"device": "atmega2560", "mcu": "m2560"},
{"device": "atmega2561", "mcu": "m2561"},
{"device": "atmega2564rfr2", "mcu": "m2564rfr2"},
{"device": "atmega256rfr2", "mcu": "m256rfr2"},
{"device": "atmega32", "mcu": "m32"},
{"device": "atmega324p", "mcu": "m324p"},
{"device": "atmega324pa", "mcu": "m324pa"},
{"device": "atmega325", "mcu": "m325"},
{"device": "atmega3250", "mcu": "m3250"},
{"device": "atmega328", "mcu": "m328"},
{"device": "atmega328p", "mcu": "m328p"},
{"device": "atmega329", "mcu": "m329"},
{"device": "atmega3290", "mcu": "m3290"},
{"device": "atmega3290p", "mcu": "m3290p"},
{"device": "atmega329p", "mcu": "m329p"},
{"device": "atmega32u2", "mcu": "m32u2"},
{"device": "atmega32u4", "mcu": "m32u4"},
{"device": "atmega406", "mcu": "m406"},
{"device": "atmega48", "mcu": "m48"},
{"device": "atmega48p", "mcu": "m48p"},
{"device": "atmega64", "mcu": "m64"},
{"device": "atmega640", "mcu": "m640"},
{"device": "atmega644", "mcu": "m644"},
{"device": "atmega644p", "mcu": "m644p"},
{"device": "atmega644rfr2", "mcu": "m644rfr2"},
{"device": "atmega645", "mcu": "m645"},
{"device": "atmega6450", "mcu": "m6450"},
{"device": "atmega649", "mcu": "m649"},
{"device": "atmega6490", "mcu": "m6490"},
{"device": "atmega64rfr2", "mcu": "m64rfr2"},
{"device": "atmega8", "mcu": "m8"},
{"device": "atmega8515", "mcu": "m8515"},
{"device": "atmega8535", "mcu": "m8535"},
{"device": "atmega88", "mcu": "m88"},
{"device": "atmega88p", "mcu": "m88p"},
{"device": "atmega8u2", "mcu": "m8u2"},
{"device": "attiny10", "mcu": "t10"},
{"device": "attiny11", "mcu": "t11"},
{"device": "attiny12", "mcu": "t12"},
{"device": "attiny13", "mcu": "t13"},
{"device": "attiny15", "mcu": "t15"},
{"device": "attiny1634", "mcu": "t1634"},
{"device": "attiny20", "mcu": "t20"},
{"device": "attiny2313", "mcu": "t2313"},
{"device": "attiny24", "mcu": "t24"},
{"device": "attiny25", "mcu": "t25"},
{"device": "attiny26", "mcu": "t26"},
{"device": "attiny261", "mcu": "t261"},
{"device": "attiny4", "mcu": "t4"},
{"device": "attiny40", "mcu": "t40"},
{"device": "attiny4313", "mcu": "t4313"},
{"device": "attiny43u", "mcu": "t43u"},
{"device": "attiny44", "mcu": "t44"},
{"device": "attiny45", "mcu": "t45"},
{"device": "attiny461", "mcu": "t461"},
{"device": "attiny5", "mcu": "t5"},
{"device": "attiny84", "mcu": "t84"},
{"device": "attiny85", "mcu": "t85"},
{"device": "attiny861", "mcu": "t861"},
{"device": "attiny88", "mcu": "t88"},
{"device": "attiny9", "mcu": "t9"},
{"device": "atxmega128a1", "mcu": "x128a1"},
{"device": "atxmega128a1revd", "mcu": "x128a1d"},
{"device": "atxmega128a1u", "mcu": "128a1u"},
{"device": "atxmega128a3", "mcu": "x128a3"},
{"device": "atxmega128a3u", "mcu": "x128a3u"},
{"device": "atxmega128a4", "mcu": "x128a4"},
{"device": "atxmega128a4u", "mcu": "x128a4u"},
{"device": "atxmega128b1", "mcu": "x128b1"},
{"device": "atxmega128b3", "mcu": "x128b3"},
{"device": "atxmega128c3", "mcu": "x128c3"},
{"device": "atxmega128d3", "mcu": "x128d3"},
{"device": "atxmega128d4", "mcu": "x128d4"},
{"device": "atxmega16a4", "mcu": "x16a4"},
{"device": "atxmega16a4u", "mcu": "x16a4u"},
{"device": "atxmega16c4", "mcu": "x16c4"},
{"device": "atxmega16d4", "mcu": "x16d4"},
{"device": "atxmega16e5", "mcu": "x16e5"},
{"device": "atxmega192a1", "mcu": "x192a1"},
{"device": "atxmega192a3", "mcu": "x192a3"},
{"device": "atxmega192a3u", "mcu": "x192a3u"},
{"device": "atxmega192c3", "mcu": "x192c3"},
{"device": "atxmega192d3", "mcu": "x192d3"},
{"device": "atxmega256a1", "mcu": "x256a1"},
{"device": "atxmega256a3", "mcu": "x256a3"},
{"device": "atxmega256a3b", "mcu": "x256a3b"},
{"device": "atxmega256a3bu", "mcu": "x256a3bu"},
{"device": "atxmega256a3u", "mcu": "x256a3u"},
{"device": "atxmega256c3", "mcu": "x256c3"},
{"device": "atxmega256d3", "mcu": "x256d3"},
{"device": "atxmega32a4", "mcu": "x32a4"},
{"device": "atxmega32a4u", "mcu": "x32a4u"},
{"device": "atxmega32c4", "mcu": "x32c4"},
{"device": "atxmega32d4", "mcu": "x32d4"},
{"device": "atxmega32e5", "mcu": "x32e5"},
{"device": "atxmega384c3", "mcu": "x384c3"},
{"device": "atxmega384d3", "mcu": "x384d3"},
{"device": "atxmega64a1", "mcu": "x64a1"},
{"device": "atxmega64a1u", "mcu": "x64a1u"},
{"device": "atxmega64a3", "mcu": "x64a3"},
{"device": "atxmega64a3u", "mcu": "x64a3u"},
{"device": "atxmega64a4", "mcu": "x64a4"},
{"device": "atxmega64a4u", "mcu": "x64a4u"},
{"device": "atxmega64b1", "mcu": "x64b1"},
{"device": "atxmega64b3", "mcu": "x64b3"},
{"device": "atxmega64c3", "mcu": "x64c3"},
{"device": "atxmega64d3", "mcu": "x64d3"},
{"device": "atxmega64d4", "mcu": "x64d4"},
{"device": "atxmega8e5", "mcu": "x8e5"}
]
|
ros_ws/src/crazyswarm/scripts/test_ros.py | marios-stam/crazyswarm | 219 | 11108600 | <reponame>marios-stam/crazyswarm
import pytest
@pytest.mark.ros
def test_ros_import():
from pycrazyswarm.crazyflie import TimeHelper, CrazyflieServer
|
fairseq/models/mcbert/model.py | motefly/MC-BERT | 104 | 11108631 | <reponame>motefly/MC-BERT
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
MC-BERT: Efficient Language Pre-Training via a Meta Controller
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
TransformerSentenceEncoder,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import McbertHubInterface
@register_model('mcbert')
class McbertModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
return {
}
def __init__(self, args, mc_encoder, gen_encoder, neither_idx):
super().__init__(gen_encoder)
self.mc = mc_encoder
self.args = args
self.neither_idx = neither_idx
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--encoder-layers', type=int, metavar='L',
help='num encoder layers')
parser.add_argument('--encoder-embed-dim', type=int, metavar='H',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='F',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-attention-heads', type=int, metavar='A',
help='num encoder attention heads')
parser.add_argument('--mc-size-divider', type=int,
help='divider for mc: layer size, FFN size and attention heads')
parser.add_argument('--class-num', type=int, default=10,
help='total number of classes')
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--pooler-activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use for pooler layer')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--embedding-normalize', action='store_true',
help='add layernorm after the embedding layer')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN')
parser.add_argument('--pooler-dropout', type=float, metavar='D',
help='dropout probability in the masked_lm pooler layers')
parser.add_argument('--max-positions', type=int,
help='number of positional embeddings to learn')
parser.add_argument('--load-checkpoint-heads', action='store_true',
help='(re-)register and load heads when loading checkpoints')
parser.add_argument('--debug', action='store_true',
help='trigger for pdb')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
args.vocab_num = len(task.source_dictionary)
args.vocab_nspecial = task.source_dictionary.nspecial
if not hasattr(args, 'max_positions'):
args.max_positions = args.tokens_per_sample
gen_encoder = GenEncoder(args, task.source_dictionary)
if args.task == 'mcbert':
mc_encoder = MCEncoder(args, task.source_dictionary, gen_encoder.sentence_encoder.embed_tokens, gen_encoder.lm_head.bias.weight)
else:
mc_encoder = None
return cls(args, mc_encoder, gen_encoder, task.source_dictionary.index('<neither>'))
def forward(self, src_tokens, features_only=False, return_all_hiddens=False, classification_head_name=None, masked_tokens=None, targets=None, **kwargs):
if classification_head_name is not None:
features_only = True
if self.args.task == 'mcbert':
mc_x_mask, features, _ = self.mc(
src_tokens,
features_only=False,
return_all_hiddens=False,
masked_tokens=masked_tokens,
return_top_features=True,
**kwargs
) # Float[num_masked, vocab]
with torch.no_grad():
mc_x_all = self.mc.lm_head(features, masked_tokens=None)
sample_probs = torch.softmax(mc_x_all.detach(), -1, dtype=torch.float32) # Float[bs, seq_len, vocab]
sample_probs = sample_probs.view(-1, sample_probs.size(-1)) # Float[bs * seq_len, vocab]
sampled_tokens = torch.multinomial(
sample_probs, self.args.class_num, replacement=True
).view(src_tokens.size(0), src_tokens.size(1), self.args.class_num) # Float[bs, seq_len, class_num]
src_tokens = src_tokens.clone()
if masked_tokens is not None: # masked_tokens : Float[bs, seq_len]
src_tokens[masked_tokens] = sampled_tokens[:, :, 0][masked_tokens]
# for replaced tokens, the correct label is 1-th element (the target)
replace_tokens = src_tokens.ne(targets)
sampled_tokens[:, :, 1][replace_tokens] = targets[replace_tokens]
# for non-replaced tokens, the correct label is 0-th element (the <neither> token)
sampled_tokens[:, :, 0] = self.neither_idx
# mcerate targets according to the rule mentioned above
gen_target = replace_tokens.long()
gen_x, extra = self.decoder(
src_tokens,
features_only=features_only,
return_all_hiddens=return_all_hiddens,
masked_tokens=None, # for generator, predict all of the tokens
out_helper=sampled_tokens if self.args.task == 'mcbert' else None,
**kwargs
)
if classification_head_name is not None:
gen_x = self.classification_heads[classification_head_name](gen_x)
if self.args.task == 'mcbert':
return mc_x_mask, gen_x, src_tokens, gen_target, extra
else:
return gen_x, extra
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
print(
'WARNING: re-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = McbertClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
@property
def supported_targets(self):
return {'self'}
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='gpt2', **kwargs):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return McbertHubInterface(x['args'], x['task'], x['models'][0])
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + '.' if name != '' else ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)
inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)
if getattr(self.args, 'load_checkpoint_heads', False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes != self.classification_heads[head_name].out_proj.out_features
or inner_dim != self.classification_heads[head_name].dense.out_features
):
print(
'WARNING: deleting classification head ({}) from checkpoint '
'with different dimensions than current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class MCLMHead(nn.Module):
"""Head for Meta Controller"""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None, share_emb_pro=None, bias=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
elif share_emb_pro is not None:
self.add_one_linear = True
self.share_emb_pro = share_emb_pro
else:
self.add_one_linear = False
self.weight = weight
if bias is None:
self.bias = nn.Parameter(torch.zeros(output_dim))
self.bias = bias
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the unmasked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
if self.add_one_linear:
weight = self.share_emb_pro(self.weight)
else:
weight = self.weight
# project back to size of vocabulary with bias
x = F.linear(x, weight) + self.bias.view(-1)
return x
class McbertClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class MCEncoder(FairseqDecoder):
"""MC encoder.
Implements the :class:`~fairseq.models.FairseqDecoder` interface required
by :class:`~fairseq.models.FairseqLanguageModel`.
"""
def __init__(self, args, dictionary, share_embed_tokens, lmhead_bias_helper):
super().__init__(dictionary)
self.args = args
self.sentence_encoder = TransformerSentenceEncoder(
padding_idx=dictionary.pad(),
vocab_size=len(dictionary),
num_encoder_layers=args.encoder_layers,
embedding_dim=int(args.encoder_embed_dim / args.mc_size_divider),
ffn_embedding_dim=int(args.encoder_ffn_embed_dim / args.mc_size_divider),
num_attention_heads=int(args.encoder_attention_heads / args.mc_size_divider),
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
max_seq_len=args.max_positions,
num_segments=0,
encoder_normalize_before=args.encoder_normalize_before,
embedding_normalize=args.embedding_normalize,
apply_bert_init=True,
activation_fn=args.activation_fn,
share_embed_tokens=share_embed_tokens,
shared_embedding_dim=args.encoder_embed_dim,
)
self.lm_head = MCLMHead(
embed_dim=int(args.encoder_embed_dim / args.mc_size_divider),
output_dim=len(dictionary),
activation_fn=args.activation_fn,
weight=self.sentence_encoder.embed_tokens.weight,
share_emb_pro=self.sentence_encoder.embed_linear,
bias=lmhead_bias_helper,
)
def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, return_top_features=False, **unused):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states.
"""
x, extra = self.extract_features(src_tokens, return_all_hiddens)
if return_top_features:
features = x
if not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
if return_top_features:
return x, features, extra
return x, extra
def extract_features(self, src_tokens, return_all_hiddens=False, **unused):
inner_states, _ = self.sentence_encoder(
src_tokens,
last_state_only=not return_all_hiddens,
)
features = inner_states[-1]
return features, {'inner_states': inner_states if return_all_hiddens else None}
def output_layer(self, features, masked_tokens=None, **unused):
return self.lm_head(features, masked_tokens)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
class GenEncoder(FairseqDecoder):
"""McbertModel generator encoder.
Implements the :class:`~fairseq.models.FairseqDecoder` interface required
by :class:`~fairseq.models.FairseqLanguageModel`.
"""
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
self.sentence_encoder = TransformerSentenceEncoder(
padding_idx=dictionary.pad(),
vocab_size=len(dictionary),
num_encoder_layers=args.encoder_layers,
embedding_dim=args.encoder_embed_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
max_seq_len=args.max_positions,
num_segments=0,
encoder_normalize_before=args.encoder_normalize_before,
embedding_normalize=args.embedding_normalize,
apply_bert_init=True,
activation_fn=args.activation_fn,
)
self.lm_head = GenLMHead(
embed_dim=int(args.encoder_embed_dim),
output_dim=args.class_num,
activation_fn=args.activation_fn,
embed_tokens=self.sentence_encoder.embed_tokens,
)
def forward(self, src_tokens, features_only=False, return_all_hiddens=False, masked_tokens=None, out_helper=None, **unused):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states.
"""
x, extra = self.extract_features(src_tokens, return_all_hiddens)
if not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens, out_helper=out_helper)
return x, extra
def extract_features(self, src_tokens, return_all_hiddens=False, **unused):
inner_states, _ = self.sentence_encoder(
src_tokens,
last_state_only=not return_all_hiddens,
)
features = inner_states[-1]
return features, {'inner_states': inner_states if return_all_hiddens else None}
def output_layer(self, features, masked_tokens=None, out_helper=None, **unused):
return self.lm_head(features, masked_tokens, out_helper)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
class GenLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, embed_tokens):
super().__init__()
self.embed_dim = embed_dim
self.output_dim = output_dim
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
self.embed_tokens = embed_tokens
self.bias = nn.Embedding(embed_tokens.num_embeddings, 1)
def forward(self, features, masked_tokens=None, out_helper=None, **kwargs):
# Only project the unmasked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
if out_helper is not None:
# x: Float[bs, seq_len, dim]
weight = self.embed_tokens(out_helper).transpose(2, 3) # Float[bs, seq_len, dim, num_class]
bias = self.bias(out_helper).transpose(2, 3) # Float[bs, seq_len, 1, num_class]
bs = x.size(0)
x = torch.baddbmm(
input=bias.view(-1, bias.size(-2), bias.size(-1)),
batch1=x.view(-1, 1, x.size(-1)),
batch2=weight.view(-1, weight.size(-2), weight.size(-1))
) # Float[bs * seq_len, 1, num_class]
return x.view(bs, -1, x.size(-1)) # Float[bs, seq_len, num_class]
else:
return x
@register_model_architecture('mcbert', 'mcbert')
def base_architecture(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.mc_size_divider = getattr(args, 'mc_size_divider', 3)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.0)
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.embedding_normalize = getattr(args, 'embedding_normalize', False)
@register_model_architecture('mcbert', 'mcbert_base')
def mcbert_base_architecture(args):
base_architecture(args)
@register_model_architecture('mcbert', 'mcbert_small')
def mcbert_small_architecture(args):
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.mc_size_divider = getattr(args, 'mc_size_divider', 2)
base_architecture(args)
@register_model_architecture('mcbert', 'mcbert_large')
def mcbert_small_architecture(args):
args.encoder_layers = getattr(args, 'encoder_layers', 24)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.mc_size_divider = getattr(args, 'mc_size_divider', 4)
base_architecture(args)
|
recipes/Python/440627_Encoding_A_String/recipe-440627.py | tdiprima/code | 2,023 | 11108635 | <filename>recipes/Python/440627_Encoding_A_String/recipe-440627.py
'''code_module.py
The purpose of this module
is to provide functions
for the coding of strings.
This is a level 1 module.'''
#==================================
# Level 1 Functions: String To Code
#==================================
def string_to_number(string):
'''string_to_number(string)
Create a starting number.
Tranlate the string into the number.
Return the number.'''
number = 1
for character in string:
number *= 256
number += ord(character)
return number
def number_to_code(number):
'''number_to_code(long)
Create a starting string.
Translate the number into the code.
Return the string.'''
code = str()
while number != 0:
code = chr(number % 255 + 1) + code
number /= 255
return code
#==================================
# Level 1 Functions: Code To String
#==================================
def code_to_number(code):
'''code_to_number(string)
Create a starting number.
Tranlate the code into the number.
Return the number.'''
number = 0
for character in code:
number *= 255
number += ord(character) - 1
return number
def number_to_string(number):
'''number_to_string(long)
Create a starting string.
Translate the number into the string.
Return the string.'''
string = str()
while number > 1:
string = chr(number % 256) + string
number /= 256
return string
#===============================
# Level 2 Functions: To And From
#===============================
def string_to_code(string):
'''string_to_code(string)
Returns a string converted to code.'''
return number_to_code(string_to_number(string))
def code_to_string(code):
'''code_to_string(string)
Returns code converted to a string.'''
return number_to_string(code_to_number(code))
#================
# CGI: Print File
#================
if __name__ == '__main__':
from sys import argv
print 'Content-type: text/plain'
print
print file(argv[0]).read()
|
4_dend_airflow_data_pipelines/exercises/dags/3_ex4_full_dag.py | AAbdul12/Data-engineering-nanodegree | 253 | 11108670 | import datetime
from airflow import DAG
from airflow.operators import (
FactsCalculatorOperator,
HasRowsOperator,
S3ToRedshiftOperator
)
# This DAG performs the following functions
# 1. Loads Trip data from S3 to RedShift
# 2. Performs a data quality check on the Trips table in RedShift
# 3. Uses the FactsCalculatorOperator to create a Facts table in Redshift
# a. **NOTE**: to complete this step you must complete the FactsCalcuatorOperator
# skeleton defined in plugins/operators/facts_calculator.py
#
dag = DAG("lesson3.exercise4", start_date=datetime.datetime.utcnow())
copy_trips_task = S3ToRedshiftOperator(
task_id="load_trips_from_s3_to_redshift",
dag=dag,
table="trips",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udacity-dend",
s3_key="data-pipelines/divvy/unpartitioned/divvy_trips_2018.csv"
)
check_trips = HasRowsOperator(
task_id="trips_has_rows",
dag=dag,
redshift_conn_id="redshift",
table="trips",
provide_context=True
)
calculate_facts = FactsCalculatorOperator(
task_id="calculate_facts",
dag=dag,
postgres_conn_id="redshift",
origin_table="trips",
destination_table="trips_facts",
fact_column="tripduration",
groupbycolumn="bikeid"
)
copy_trips_task >> check_trips
check_trips >> calculate_facts
|
moya/dbexpression.py | moyaproject/moya | 129 | 11108693 | from __future__ import unicode_literals
from __future__ import print_function
from .context import dataindex
from .context.tools import to_expression
from .compat import implements_to_string, text_type
from .context.missing import is_missing
from .interface import unproxy
from pyparsing import (
Word,
WordEnd,
nums,
alphas,
Combine,
oneOf,
opAssoc,
operatorPrecedence,
QuotedString,
Literal,
ParserElement,
ParseException,
Forward,
Group,
Suppress,
Optional,
Regex,
)
from sqlalchemy import and_, or_, func, not_
import operator
import re
import threading
def dbobject(obj):
return getattr(obj, "__moyadbobject__", lambda: obj)()
@implements_to_string
class DBExpressionError(Exception):
hide_py_traceback = True
error_type = "Database expression error"
def __init__(self, exp, msg=None, col=None):
self.exp = exp
self.msg = msg or ""
self.col = col
def __str__(self):
return self.msg
def __moyaconsole__(self, console):
indent = ""
console(indent + self.exp, bold=True, fg="magenta").nl()
if self.col:
console(indent)(" " * (self.col - 1) + "^", bold=True, fg="red").nl()
class DBEvalError(Exception):
pass
def pairs(tokenlist):
"""Converts a list in to a sequence of paired values"""
return zip(tokenlist[::2], tokenlist[1::2])
class ExpressionContext(object):
def __init__(self, exp):
self.exp = exp
self._joins = []
super(ExpressionContext, self).__init__()
def __repr__(self):
return "<expressioncontext '{}'>".format(self.exp)
def add_joins(self, joins):
self._joins.append(joins)
def process_qs(self, qs):
# TODO: Is this deprecated now?
for j in self._joins:
if isinstance(j, (tuple, list)):
qs = qs.join(*j)
else:
qs = qs.join(j)
return qs
class ExpressionModifiers(object):
def abs(self, context, v):
return func.abs(v)
def count(self, context, v):
return func.count(v)
def sum(self, context, v):
return func.sum(v)
def min(self, context, v):
return func.min(v)
def max(self, context, v):
return func.max(v)
def lower(self, context, v):
return func.lower(v)
class EvalModifierOp(object):
modifiers = ExpressionModifiers()
def __init__(self, tokens):
filter, value = tokens[0]
self.value = value
self._eval = value.eval
try:
self.filter_func = getattr(self.modifiers, filter[:-1])
except AttributeError:
raise DBEvalError("unknown filter type '%s'" % filter)
def eval(self, archive, context, app, exp_context):
return self.filter_func(context, self._eval(archive, context, app, exp_context))
class EvalMultOp(object):
"Class to evaluate multiplication and division expressions"
ops = {
"*": operator.imul,
"/": operator.itruediv,
"//": operator.ifloordiv,
"%": operator.imod,
}
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
ops = self.ops
self.operator_eval = [(ops[op], val.eval) for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
prod = self._eval(archive, context, app, exp)
for op_func, _eval in self.operator_eval:
prod = op_func(prod, _eval(archive, context, app, exp_context))
return prod
class EvalAddOp(object):
"Class to evaluate addition and subtraction expressions"
ops = {"+": operator.add, "-": operator.sub}
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
ops = self.ops
self.operator_eval = [(ops[op], val.eval) for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
sum = self._eval(archive, context, app, exp_context)
for op_func, _eval in self.operator_eval:
sum = op_func(sum, _eval(archive, context, app, exp_context))
return sum
class EvalConstant(object):
"""Evaluates a constant"""
constants = {"None": None, "True": True, "False": False, "yes": True, "no": False}
def __init__(self, tokens):
self.key = tokens[0]
self.value = self.constants[self.key]
def eval(self, archive, context, app, exp_context):
return self.value
class EvalInteger(object):
"Class to evaluate an integer value"
def __init__(self, tokens):
self.value = int(tokens[0])
def eval(self, archive, context, app, exp_context):
return self.value
class EvalReal(object):
"Class to evaluate a real number value"
def __init__(self, tokens):
self.value = float(tokens[0])
def eval(self, archive, context, app, exp_context):
return self.value
class EvalString(object):
"Class to evaluate a string"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self, archive, context, app, exp_context):
return self.value
def qs(value):
if hasattr(value, "__moyadbobject__"):
value = value.__moyadbobject__()
if hasattr(value, "_get_query_set"):
value = value._get_query_set()
if isinstance(value, list):
return [getattr(v, "id", v) for v in value]
return value
class EvalVariable(object):
"Class to evaluate a parsed variable"
def __init__(self, tokens):
key = tokens[0]
self.index = dataindex.parse(key)
def eval(self, archive, context, app, exp_context):
value = context[self.index]
if is_missing(value):
raise DBEvalError(
"Database expression value '{}' is missing from the context".format(
self.index
)
)
return dbobject(unproxy(value))
class EvalModelReference(object):
"""Gets a model reference"""
_ref_model_ref = re.compile("^(.*?#.*?)(?:\.(.*?))?$")
def __init__(self, tokens):
self.index = tokens[0]
def eval(self, archive, context, app, exp_context):
model_ref, index = self._ref_model_ref.match(self.index).groups()
app = app or context.get(".app", None)
if app is None:
raise DBEvalError("unable to get app from '{}'".format(self.index))
if index is None:
app, model_element = app.get_element(model_ref)
try:
table_class = model_element.get_table_class(app)
except Exception as e:
raise DBEvalError(str(e))
return table_class
index = list(dataindex.parse(index))
app, model_element = app.get_element(model_ref)
try:
table_class = model_element.get_table_class(app)
except Exception as e:
raise DBEvalError(str(e))
try:
model_reference_result = table_class._get_index(
archive, context, app, exp_context, index
)
except (KeyError, AttributeError):
raise DBEvalError('no column or object called "{}"'.format(self.index))
else:
return model_reference_result
class EvalComparisonOp(object):
"Class to evaluate comparison expressions"
@classmethod
def match_re(cls, a, b):
return bool(b.match(a))
@classmethod
def escape_like(cls, like, _should_escape="\\%_".__contains__):
"""escape LIKE comparisons"""
if not isinstance(like, text_type):
return like
return "".join("\\" + c if _should_escape(c) else c for c in like)
def in_(context, a, b):
if hasattr(b, "__moyadbsubselect__"):
sub_b = b.__moyadbsubselect__(context)
if sub_b is not None:
b = sub_b
a = qs(a)
try:
return a.in_(qs(b))
except:
raise DBEvalError(
"db expression 'in' operator works on columns only (did you mean .id)?"
)
def notin_(context, a, b):
if hasattr(b, "__moyadbsubselect__"):
sub_b = b.__moyadbsubselect__(context)
if sub_b is not None:
b = sub_b
a = qs(a)
try:
return a.notin_(qs(b))
except:
raise DBEvalError(
"db expression 'not in' operator works on columns only (did you mean .id)?"
)
def contains_(context, a, b):
try:
return qs(a).contains(qs(b))
except:
raise DBEvalError(
"value {} is an invalid operand for the 'contains' operator".format(
to_expression(context, b)
)
)
def icontains_(context, a, b):
if not isinstance(b, text_type):
raise DBEvalError(
"icontains right hand side should be a string, not {}".format(
context.to_expr(b)
)
)
b = "%{}%".format(EvalComparisonOp.escape_like(b))
try:
return qs(a).like(b)
except:
raise DBEvalError(
"{} may not be used with 'icontains' operator".format(
context.to_expr(a)
)
)
def ieq(context, a, b):
if not isinstance(b, text_type):
raise DBEvalError(
"case insensitive equality operator (~=) right hand side should be a string, not {}".format(
context.to_expr(b)
)
)
return qs(a).ilike(EvalComparisonOp.escape_like(b), escape="\\")
opMap = {
"<": lambda c, a, b: qs(a) < qs(b),
"lt": lambda c, a, b: qs(a) < qs(b),
"<=": lambda c, a, b: qs(a) <= qs(b),
"lte": lambda c, a, b: qs(a) <= qs(b),
">": lambda c, a, b: qs(a) > qs(b),
"gt": lambda c, a, b: qs(a) > qs(b),
">=": lambda c, a, b: qs(a) >= qs(b),
"gte": lambda c, a, b: qs(a) >= qs(b),
"!=": lambda c, a, b: qs(a) != qs(b),
"==": lambda c, a, b: qs(a) == qs(b),
"like": lambda c, a, b: qs(a).like(qs(b)),
"ilike": lambda c, a, b: qs(a).ilike(qs(b)),
# "~=": lambda c, a, b: qs(a).ilike(qs(EvalComparisonOp.escape_like(b)), escape='\\'),
"~=": ieq,
"^=": lambda c, a, b: qs(a).startswith(qs(b)),
"$=": lambda c, a, b: qs(a).endswith(qs(b)),
"in": in_,
"not in": notin_,
"contains": contains_,
"icontains": icontains_,
# "icontains": lambda c, a, b: qs(a).like('%' + EvalComparisonOp.escape_like(b) + '%', escape='\\')
}
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [
(self.opMap[op], val.eval) for op, val in pairs(self.value[1:])
]
def eval(self, archive, context, app, exp_context):
val1 = self._eval(archive, context, app, exp_context)
for op_func, _eval in self.operator_eval:
val2 = _eval(archive, context, app, exp_context)
val1 = op_func(context, val1, val2)
return val1
class EvalLogicOpAND(object):
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
val1 = self._eval(archive, context, app, exp_context)
for _eval in self.operator_eval:
val2 = _eval(archive, context, app, exp_context)
val1 = and_(val1, val2)
return val1
class EvalLogicOpOR(object):
def __init__(self, tokens):
self.value = tokens[0]
self._eval = self.value[0].eval
self.operator_eval = [val.eval for op, val in pairs(self.value[1:])]
def eval(self, archive, context, app, exp_context):
val1 = self._eval(archive, context, app, exp_context)
for _eval in self.operator_eval:
val2 = _eval(archive, context, app, exp_context)
val1 = or_(val1, val2)
return val1
class EvalGroupOp(object):
def __init__(self, tokens):
self._evals = [t.eval for t in tokens[0][0::2]]
def eval(self, archive, context, app, exp_context):
val = [eval(archive, context, app, exp_context) for eval in self._evals]
return val
class EvalNotOp(object):
"""Class to evaluate expressions with logical NOT"""
def __init__(self, tokens):
self._eval = tokens[0][1].eval
def eval(self, archive, context, app, exp_context):
return not_(self._eval(archive, context, app, exp_context))
integer = Word(nums)
real = Combine(Word(nums) + "." + Word(nums))
constant = (
Literal("True")
| Literal("False")
| Literal("None")
| Literal("yes")
| Literal("no")
) + WordEnd()
model_reference = Regex(r"([\w\.]*#[\w\.]+)")
variable = Regex(r"([a-zA-Z0-9\._]+)")
string = QuotedString('"', escChar="\\") | QuotedString("'", escChar="\\")
operand = model_reference | real | integer | constant | string | variable
plusop = oneOf("+ -")
multop = oneOf("* / // %")
groupop = Literal(",")
expr = Forward()
notop = Literal("not") + WordEnd()
modifier = Combine(Word(alphas + nums) + ":")
integer.setParseAction(EvalInteger)
real.setParseAction(EvalReal)
string.setParseAction(EvalString)
constant.setParseAction(EvalConstant)
variable.setParseAction(EvalVariable)
model_reference.setParseAction(EvalModelReference)
comparisonop = (
oneOf("< <= > >= != == ~= ^= $=")
| (Literal("not in") + WordEnd())
| (oneOf("in lt lte gt gte matches contains icontains like ilike") + WordEnd())
)
logicopOR = Literal("or") + WordEnd()
logicopAND = Literal("and") + WordEnd()
expr << operatorPrecedence(
operand,
[
(notop, 1, opAssoc.RIGHT, EvalNotOp),
(modifier, 1, opAssoc.RIGHT, EvalModifierOp),
(multop, 2, opAssoc.LEFT, EvalMultOp),
(plusop, 2, opAssoc.LEFT, EvalAddOp),
(comparisonop, 2, opAssoc.LEFT, EvalComparisonOp),
(logicopAND, 2, opAssoc.LEFT, EvalLogicOpAND),
(logicopOR, 2, opAssoc.LEFT, EvalLogicOpOR),
(groupop, 2, opAssoc.LEFT, EvalGroupOp),
],
)
@implements_to_string
class DBExpression(object):
exp_cache = {}
_lock = threading.Lock()
def __init__(self, exp):
self.exp = exp
def __repr__(self):
return '<DBExpression "%s">' % self.exp
def __str__(self):
return self.exp
def eval(self, archive, context, app=None):
exp_context = ExpressionContext(self.exp)
try:
eval = self.compile_cache(self.exp)
result = eval(archive, context, app, exp_context)
except DBEvalError as e:
raise DBExpressionError(self.exp, text_type(e))
return result
def eval2(self, archive, context, app=None):
exp_context = ExpressionContext(self.exp)
try:
eval = self.compile_cache(self.exp)
result = eval(archive, context, app, exp_context)
except DBEvalError as e:
raise DBExpressionError(self.exp, text_type(e))
return result, exp_context
def compile(self):
return self.compile_cache(self.exp)
def compile_cache(self, exp):
with self._lock:
try:
return self.exp_cache[exp]
except KeyError:
try:
compiled_exp = expr.parseString(exp, parseAll=True)
except ParseException as e:
raise DBExpressionError(exp, text_type(e), col=e.col)
eval = self.exp_cache[exp] = compiled_exp[0].eval
return eval
if __name__ == "__main__":
"""
<db:filter model="#TagDB">#TagDB.name==name and #TagDB.company.pk==company_pk</db:filter>
"""
exp = DBExpression("moya.auth#User.username=='will'")
print(exp.compile())
exp = DBExpression("auth#User.username=='will'")
print(exp.compile())
exp = DBExpression(
"comments#Comment.namespace == app.name and comments#Comment.object in comment_keys"
)
print(exp.compile())
exp = DBExpression("#CommentObject.count + 1")
print(exp.compile)
|
examples/models/templates/ModelName.py | jsreid13/seldon-core | 3,049 | 11108694 | from <your_loading_library> import <your_loading_function>
class ModelName(object):
def __init__(self):
self.model = <your_loading_function>(<your_model_file>)
def predict(self,X,features_names):
return self.model.predict(X)
|
samples/add_user_group.py | kevinsigwart/ArcREST | 208 | 11108711 | """
This sample shows how to add a user to a group
Python 2/3
ArcREST 3.5.1
"""
from __future__ import print_function
import arcrest
from arcresthelper import securityhandlerhelper
from arcresthelper import common
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback, inspect, sys
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
filename = inspect.getfile(inspect.currentframe())
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, filename, synerror
def main():
proxy_port = None
proxy_url = None
securityinfo = {}
securityinfo['security_type'] = 'Portal'#LDAP, NTLM, OAuth, Portal, PKI
securityinfo['username'] = ""#<UserName>
securityinfo['password'] = ""#<Password>
securityinfo['org_url'] = "http://www.arcgis.com"
securityinfo['proxy_url'] = proxy_url
securityinfo['proxy_port'] = proxy_port
securityinfo['referer_url'] = None
securityinfo['token_url'] = None
securityinfo['certificatefile'] = None
securityinfo['keyfile'] = None
securityinfo['client_id'] = None
securityinfo['secret_id'] = None
username = "" #Username to add
groupname = "" #Group Name
try:
shh = securityhandlerhelper.securityhandlerhelper(securityinfo=securityinfo)
if shh.valid == False:
print (shh.message)
else:
admin = arcrest.manageorg.Administration(securityHandler=shh.securityhandler)
community = admin.community
groups = community.groups
groupIds = community.getGroupIDs(groupNames=groupname)
if groupIds is None:
print ("Group not found")
if len(groupIds) == 0:
print ("Group not found")
groupId = groupIds[0]
group = groups.group(groupId=groupId)
if group is None:
print ("Group not found")
res = group.addUsersToGroups(users=username)
print (res)
except (common.ArcRestHelperError),e:
print ("error in function: %s" % e[0]['function'])
print ("error on line: %s" % e[0]['line'])
print ("error in file name: %s" % e[0]['filename'])
print ("with error message: %s" % e[0]['synerror'])
if 'arcpyError' in e[0]:
print ("with arcpy message: %s" % e[0]['arcpyError'])
except:
line, filename, synerror = trace()
print ("error on line: %s" % line)
print ("error in file name: %s" % filename)
print ("with error message: %s" % synerror)
if __name__ == "__main__":
main() |
mmtbx/validation/mean_devs_VAL_THR_ILE_phi_psi.py | dperl-sol/cctbx_project | 155 | 11108779 | <gh_stars>100-1000
from __future__ import division
# Phi/Psi corrections in Angstrom and Radials
mean_devs = {
(-170, -180) : (0.039100, -1.177605), # 20
(-170, -170) : (0.035297, -1.183224), # 12
(-170, 140) : (0.010351, -0.992488), # 39
(-170, 150) : (0.019600, 0.905143), # 84
(-170, 160) : (0.039774, 0.475851), # 72
(-170, 170) : (0.039804, 0.537838), # 32
(-170, 180) : (0.034055, 0.228127), # 22
(-160, -180) : (0.055875, -1.071045), # 71
(-160, -170) : (0.066824, -0.922656), # 61
(-160, -160) : (0.069252, -0.904102), # 14
(-160, 130) : (0.018519, -1.690495), # 48
(-160, 140) : (0.035125, -1.193746), # 212
(-160, 150) : (0.027591, -0.348835), # 538
(-160, 160) : (0.028775, -0.078730), # 329
(-160, 170) : (0.030259, -0.494728), # 125
(-160, 180) : (0.045031, -0.859731), # 72
(-150, -180) : (0.053651, -1.126372), # 92
(-150, -170) : (0.085264, -1.234147), # 142
(-150, -160) : (0.090567, -1.113362), # 31
(-150, 100) : (0.038911, 1.637471), # 13
(-150, 110) : (0.039372, 1.041075), # 23
(-150, 120) : (0.003001, -1.219585), # 54
(-150, 130) : (0.020729, -1.128077), # 259
(-150, 140) : (0.029106, -0.956691), # 863
(-150, 150) : (0.029243, -0.810366), # 937
(-150, 160) : (0.018249, 0.916283), # 601
(-150, 170) : (0.020163, 1.255210), # 388
(-150, 180) : (0.020676, -0.853797), # 134
(-140, -180) : (0.018951, 0.298835), # 141
(-140, -170) : (0.040871, -1.080730), # 117
(-140, -160) : (0.104336, -1.239540), # 32
(-140, -150) : (0.118324, -1.314583), # 10
(-140, -60) : (0.033567, -1.726091), # 10
(-140, -50) : (0.004572, 2.864564), # 12
(-140, -40) : (0.048146, -2.037771), # 10
(-140, -30) : (0.035968, -2.056307), # 25
(-140, -20) : (0.044028, -2.459679), # 19
(-140, -10) : (0.028699, 2.496739), # 40
(-140, 0) : (0.012065, 3.128299), # 42
(-140, 10) : (0.005250, 2.917852), # 32
(-140, 20) : (0.003626, -2.880002), # 46
(-140, 30) : (0.038224, -1.292172), # 24
(-140, 40) : (0.039848, -1.458059), # 26
(-140, 50) : (0.045414, -1.691334), # 23
(-140, 60) : (0.028321, -1.383843), # 31
(-140, 70) : (0.020617, -0.776091), # 52
(-140, 80) : (0.019672, -0.966172), # 49
(-140, 90) : (0.027733, 0.451979), # 60
(-140, 100) : (0.031654, 1.119220), # 95
(-140, 110) : (0.031791, 1.263151), # 216
(-140, 120) : (0.033046, 1.348114), # 742
(-140, 130) : (0.031191, 1.116316), # 1970
(-140, 140) : (0.020804, 0.392644), # 2293
(-140, 150) : (0.002702, 0.908554), # 1826
(-140, 160) : (0.025906, 1.927208), # 2502
(-140, 170) : (0.035307, 1.559917), # 1439
(-140, 180) : (0.028871, 1.357359), # 303
(-130, -180) : (0.011292, -0.022455), # 178
(-130, -170) : (0.025431, -0.841925), # 121
(-130, -160) : (0.092040, -1.256506), # 36
(-130, -100) : (0.083186, -1.702218), # 15
(-130, -90) : (0.086638, -1.547633), # 24
(-130, -80) : (0.061876, -1.617552), # 23
(-130, -70) : (0.042582, -1.329022), # 22
(-130, -60) : (0.032320, -1.638576), # 72
(-130, -50) : (0.032905, -1.463480), # 62
(-130, -40) : (0.038181, -1.928063), # 54
(-130, -30) : (0.042138, -2.388421), # 93
(-130, -20) : (0.032274, -2.650007), # 252
(-130, -10) : (0.025416, -2.690438), # 293
(-130, 0) : (0.031060, -2.265391), # 279
(-130, 10) : (0.026805, -2.026934), # 266
(-130, 20) : (0.033225, -1.752752), # 231
(-130, 30) : (0.043239, -1.663039), # 154
(-130, 40) : (0.049720, -1.673735), # 97
(-130, 50) : (0.061324, -1.606206), # 60
(-130, 60) : (0.054534, -1.520821), # 109
(-130, 70) : (0.031602, -0.951458), # 149
(-130, 80) : (0.021598, -0.828470), # 177
(-130, 90) : (0.015097, -0.291520), # 220
(-130, 100) : (0.020191, 0.558579), # 375
(-130, 110) : (0.020975, 0.723288), # 1076
(-130, 120) : (0.024324, 1.009148), # 3515
(-130, 130) : (0.027783, 0.887899), # 6274
(-130, 140) : (0.024640, 0.511681), # 3901
(-130, 150) : (0.013799, -2.199653), # 2879
(-130, 160) : (0.009897, 3.135324), # 3748
(-130, 170) : (0.015384, 1.493375), # 1917
(-130, 180) : (0.016643, 1.145796), # 373
(-120, -180) : (0.015445, -0.152133), # 105
(-120, -170) : (0.039681, -1.175070), # 70
(-120, -160) : (0.067194, -1.325840), # 11
(-120, -150) : (0.087968, -1.366773), # 11
(-120, -140) : (0.152628, -1.450107), # 10
(-120, -100) : (0.129841, -1.541614), # 14
(-120, -90) : (0.082933, -1.369499), # 31
(-120, -80) : (0.073783, -1.548882), # 40
(-120, -70) : (0.064228, -1.505918), # 65
(-120, -60) : (0.044445, -1.590806), # 186
(-120, -50) : (0.035180, -1.270478), # 210
(-120, -40) : (0.027087, -1.166147), # 128
(-120, -30) : (0.047747, -2.124127), # 203
(-120, -20) : (0.037015, -2.374941), # 556
(-120, -10) : (0.042211, -2.195318), # 773
(-120, 0) : (0.037122, -2.030320), # 677
(-120, 10) : (0.042402, -1.921795), # 608
(-120, 20) : (0.051830, -1.797003), # 527
(-120, 30) : (0.065015, -1.717984), # 217
(-120, 40) : (0.069011, -1.610637), # 65
(-120, 60) : (0.033718, -1.615376), # 14
(-120, 70) : (0.040346, -1.353159), # 37
(-120, 80) : (0.025695, -0.647170), # 99
(-120, 90) : (0.023101, -0.187190), # 203
(-120, 100) : (0.022700, 0.113396), # 588
(-120, 110) : (0.018771, 0.369761), # 1808
(-120, 120) : (0.024071, 0.664790), # 5812
(-120, 130) : (0.028615, 0.635577), # 7908
(-120, 140) : (0.025823, 0.295269), # 3286
(-120, 150) : (0.023733, -1.918635), # 1811
(-120, 160) : (0.013737, -2.271646), # 1917
(-120, 170) : (0.006331, 1.333296), # 909
(-120, 180) : (0.017084, 1.021596), # 205
(-110, -180) : (0.020220, -0.834912), # 68
(-110, -170) : (0.020710, -0.422097), # 53
(-110, -160) : (0.072020, -1.362856), # 12
(-110, -100) : (0.095726, -1.431237), # 11
(-110, -90) : (0.114713, -1.551160), # 23
(-110, -80) : (0.082703, -1.425735), # 29
(-110, -70) : (0.066952, -1.566962), # 51
(-110, -60) : (0.044522, -1.501933), # 169
(-110, -50) : (0.026507, -1.183272), # 312
(-110, -40) : (0.031472, -1.295729), # 251
(-110, -30) : (0.041576, -1.867514), # 258
(-110, -20) : (0.036150, -2.202371), # 745
(-110, -10) : (0.040648, -2.027974), # 873
(-110, 0) : (0.038531, -2.025919), # 819
(-110, 10) : (0.050562, -1.896469), # 764
(-110, 20) : (0.072009, -1.783872), # 476
(-110, 30) : (0.065275, -1.779605), # 141
(-110, 40) : (0.048289, -1.438638), # 17
(-110, 50) : (0.102641, -1.754653), # 12
(-110, 80) : (0.038746, -0.872601), # 18
(-110, 90) : (0.026924, -0.488421), # 102
(-110, 100) : (0.027291, -0.062153), # 572
(-110, 110) : (0.022013, 0.217559), # 2044
(-110, 120) : (0.025312, 0.529715), # 5518
(-110, 130) : (0.029844, 0.528276), # 6161
(-110, 140) : (0.026977, 0.182348), # 2112
(-110, 150) : (0.023594, -1.845726), # 853
(-110, 160) : (0.005900, -2.088980), # 907
(-110, 170) : (0.018649, 1.201660), # 517
(-110, 180) : (0.015396, 1.027305), # 120
(-100, -180) : (0.020408, 0.185498), # 46
(-100, -170) : (0.026144, -0.757265), # 48
(-100, -160) : (0.090657, -1.046887), # 13
(-100, -90) : (0.102330, -1.521455), # 14
(-100, -80) : (0.072720, -1.310019), # 16
(-100, -70) : (0.062296, -1.319038), # 39
(-100, -60) : (0.046745, -1.544039), # 160
(-100, -50) : (0.025759, -1.110632), # 443
(-100, -40) : (0.026827, -0.896894), # 412
(-100, -30) : (0.034651, -1.838309), # 318
(-100, -20) : (0.031750, -2.194610), # 690
(-100, -10) : (0.034680, -2.197765), # 840
(-100, 0) : (0.038343, -2.030933), # 998
(-100, 10) : (0.049948, -1.890953), # 738
(-100, 20) : (0.062976, -1.829923), # 258
(-100, 30) : (0.071842, -1.682743), # 42
(-100, 40) : (0.096815, -1.623131), # 11
(-100, 60) : (0.049525, -1.335983), # 10
(-100, 70) : (0.041289, -0.772956), # 16
(-100, 80) : (0.034276, -0.976998), # 30
(-100, 90) : (0.037794, -0.713208), # 90
(-100, 100) : (0.024078, -0.209898), # 444
(-100, 110) : (0.023245, 0.140584), # 1782
(-100, 120) : (0.025831, 0.393736), # 3931
(-100, 130) : (0.029760, 0.479753), # 3940
(-100, 140) : (0.030358, 0.169726), # 1295
(-100, 150) : (0.021572, -1.575677), # 492
(-100, 160) : (0.003847, 2.058461), # 578
(-100, 170) : (0.024890, 1.159575), # 430
(-100, 180) : (0.020909, 1.053170), # 102
(-90, -180) : (0.025057, 0.627467), # 45
(-90, -170) : (0.036556, -1.208946), # 22
(-90, -160) : (0.032532, -0.693381), # 11
(-90, -70) : (0.075865, -1.576000), # 35
(-90, -60) : (0.046183, -1.550310), # 141
(-90, -50) : (0.024688, -1.053965), # 606
(-90, -40) : (0.030710, -1.008599), # 589
(-90, -30) : (0.039961, -1.689632), # 416
(-90, -20) : (0.031583, -2.157844), # 792
(-90, -10) : (0.033368, -2.227301), # 1074
(-90, 0) : (0.035012, -2.095990), # 1028
(-90, 10) : (0.044279, -1.880319), # 394
(-90, 20) : (0.051895, -1.832240), # 44
(-90, 40) : (0.064170, -1.647912), # 10
(-90, 50) : (0.042887, -1.243863), # 12
(-90, 60) : (0.051084, -1.355028), # 23
(-90, 70) : (0.078866, -1.421970), # 45
(-90, 80) : (0.050852, -1.155882), # 125
(-90, 90) : (0.039571, -0.971753), # 179
(-90, 100) : (0.025085, -0.450660), # 433
(-90, 110) : (0.021645, -0.071709), # 1360
(-90, 120) : (0.023916, 0.239667), # 2775
(-90, 130) : (0.029990, 0.297050), # 2804
(-90, 140) : (0.033344, 0.135823), # 998
(-90, 150) : (0.016757, -1.104586), # 379
(-90, 160) : (0.014991, 1.466174), # 666
(-90, 170) : (0.039526, 1.252086), # 533
(-90, 180) : (0.024542, 0.954524), # 94
(-80, -180) : (0.038022, 0.986759), # 40
(-80, -170) : (0.038421, 1.010631), # 28
(-80, -60) : (0.047030, -1.550948), # 156
(-80, -50) : (0.031225, -1.211185), # 1091
(-80, -40) : (0.038368, -1.151238), # 1342
(-80, -30) : (0.060821, -1.728169), # 809
(-80, -20) : (0.048417, -2.095827), # 1019
(-80, -10) : (0.043192, -2.206976), # 1178
(-80, 0) : (0.054472, -1.953496), # 474
(-80, 10) : (0.048692, -1.811687), # 82
(-80, 60) : (0.088120, -1.489534), # 18
(-80, 70) : (0.051681, -1.155341), # 26
(-80, 80) : (0.063863, -1.322189), # 67
(-80, 90) : (0.048564, -1.152976), # 82
(-80, 100) : (0.028063, -0.843964), # 224
(-80, 110) : (0.020497, -0.449375), # 775
(-80, 120) : (0.021419, 0.102185), # 2273
(-80, 130) : (0.026538, 0.122153), # 2638
(-80, 140) : (0.035253, -0.015866), # 1079
(-80, 150) : (0.015586, -1.340575), # 487
(-80, 160) : (0.016404, 1.651580), # 935
(-80, 170) : (0.041047, 1.269616), # 818
(-80, 180) : (0.039924, 1.268462), # 109
(-70, -60) : (0.059024, -1.616031), # 229
(-70, -50) : (0.038942, -1.251738), # 6470
(-70, -40) : (0.050463, -1.321426), # 13825
(-70, -30) : (0.083713, -1.767829), # 3619
(-70, -20) : (0.065105, -1.977661), # 2028
(-70, -10) : (0.064499, -2.065330), # 1160
(-70, 0) : (0.069219, -1.966177), # 157
(-70, 100) : (0.023202, -0.341432), # 24
(-70, 110) : (0.022787, -0.805295), # 176
(-70, 120) : (0.018220, -0.282864), # 1247
(-70, 130) : (0.025526, -0.117578), # 2656
(-70, 140) : (0.031534, -0.272924), # 1353
(-70, 150) : (0.019928, -1.494329), # 766
(-70, 160) : (0.009805, 1.790617), # 1090
(-70, 170) : (0.033224, 1.206697), # 492
(-70, 180) : (0.015042, 0.333699), # 32
(-60, -60) : (0.068141, -1.649754), # 330
(-60, -50) : (0.039577, -1.281559), # 19101
(-60, -40) : (0.047504, -1.289895), # 19952
(-60, -30) : (0.070756, -1.644570), # 3416
(-60, -20) : (0.063359, -1.873275), # 1241
(-60, -10) : (0.077656, -2.023494), # 124
(-60, 110) : (0.016187, -0.490848), # 19
(-60, 120) : (0.020170, -0.302879), # 463
(-60, 130) : (0.025008, -0.380850), # 1938
(-60, 140) : (0.030405, -0.621625), # 1316
(-60, 150) : (0.025029, -1.592350), # 522
(-60, 160) : (0.003992, 2.751709), # 224
(-60, 170) : (0.020786, 0.485073), # 38
(-50, -60) : (0.075194, -1.706880), # 129
(-50, -50) : (0.046158, -1.449030), # 2092
(-50, -40) : (0.057016, -1.443656), # 1517
(-50, -30) : (0.068207, -1.601243), # 325
(-50, -20) : (0.047212, -1.586467), # 23
(-50, 120) : (0.019259, -0.337252), # 147
(-50, 130) : (0.026765, -0.385795), # 648
(-50, 140) : (0.035590, -0.750828), # 314
(-50, 150) : (0.027311, -1.520417), # 43
(-40, -60) : (0.070546, -1.619691), # 25
(-40, -50) : (0.054911, -1.512325), # 65
(-40, -40) : (0.041599, -1.153152), # 20
(-40, 120) : (0.017717, 0.142113), # 21
(-40, 130) : (0.022994, -0.572740), # 36
(40, 40) : (0.199025, -1.892311), # 13
(40, 50) : (0.129214, -1.808620), # 20
(40, 60) : (0.109516, -1.887967), # 17
(50, 40) : (0.150394, -1.777165), # 25
(50, 50) : (0.102268, -1.849139), # 29
(50, 60) : (0.078204, -1.638902), # 21
(60, 40) : (0.099065, -1.771405), # 17
(60, 50) : (0.078828, -1.817775), # 16
(60, 60) : (0.113703, -1.792203), # 22
(60, 70) : (0.112526, -1.761346), # 10
(70, -60) : (0.118713, -2.020618), # 29
(70, -50) : (0.097268, -2.024971), # 23
(70, 0) : (0.141350, -2.247120), # 12
(70, 160) : (0.128734, -2.394952), # 13
(70, 170) : (0.081413, -2.838786), # 14
(80, -10) : (0.173921, -2.422485), # 20
(80, 0) : (0.186558, -2.434710), # 19
}
if __name__ == '__main__':
for phi_psi in [(0,0), (-60,-60)]:
print(phi_psi, mean_devs.get(phi_psi, None))
|
pyvgdlmaster/examples/gridphysics/frogs_video.py | LRHammond/pv4dsrl | 111 | 11108798 | '''
VGDL example: Same as frogs.py, but uploads video on youtube
@author: <NAME>, <NAME>
'''
from frogs import frog_level, frog_game
if __name__ == "__main__":
from vgdl.core import VGDLParser
from vgdl.youtube import upload
game = VGDLParser.playGame(frog_game, frog_level, persist_movie=True)
upload(game.video_file)
|
tests/unit/ops/test_normalize.py | NVIDIA/NVTabular | 543 | 11108833 | <reponame>NVIDIA/NVTabular
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import numpy as np
import pandas as pd
import pytest
import nvtabular as nvt
import nvtabular.io
from nvtabular import ColumnSelector, dispatch, ops
from nvtabular.dispatch import HAS_GPU
if HAS_GPU:
_CPU = [True, False]
_HAS_GPU = True
else:
_CPU = [True]
_HAS_GPU = False
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1] if _HAS_GPU else [None])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
# TODO: dask workflow doesn't support min/max on string columns, so won't work
# with op_columns=None
@pytest.mark.parametrize("op_columns", [["x"], ["x", "y"]])
@pytest.mark.parametrize("cpu", _CPU)
def test_normalize_minmax(tmpdir, dataset, gpu_memory_frac, engine, op_columns, cpu):
df = dataset.to_ddf().compute()
cont_features = op_columns >> ops.NormalizeMinMax()
processor = nvtabular.Workflow(cont_features)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
new_gdf.index = df.index # Make sure index is aligned for checks
for col in op_columns:
col_min = df[col].min()
assert col_min == pytest.approx(processor.output_node.op.mins[col], 1e-2)
col_max = df[col].max()
assert col_max == pytest.approx(processor.output_node.op.maxs[col], 1e-2)
df[col] = (df[col] - processor.output_node.op.mins[col]) / (
processor.output_node.op.maxs[col] - processor.output_node.op.mins[col]
)
assert np.all((df[col] - new_gdf[col]).abs().values <= 1e-2)
@pytest.mark.parametrize("gpu_memory_frac", [0.01, 0.1])
@pytest.mark.parametrize("engine", ["parquet", "csv", "csv-no-header"])
@pytest.mark.parametrize("op_columns", [["x"], ["x", "y"]])
def test_normalize(tmpdir, df, dataset, gpu_memory_frac, engine, op_columns):
cont_features = op_columns >> ops.Normalize()
processor = nvtabular.Workflow(cont_features)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
new_gdf.index = df.index # Make sure index is aligned for checks
for col in op_columns:
assert math.isclose(df[col].mean(), processor.output_node.op.means[col], rel_tol=1e-4)
assert math.isclose(df[col].std(), processor.output_node.op.stds[col], rel_tol=1e-4)
df[col] = (df[col] - processor.output_node.op.means[col]) / processor.output_node.op.stds[
col
]
assert np.all((df[col] - new_gdf[col]).abs().values <= 1e-2)
# our normalize op also works on dicts of cupy/numpy tensors. make sure this works like we'd
# expect
df = dataset.compute()
cupy_inputs = {col: df[col].values for col in op_columns}
cupy_outputs = cont_features.op.transform(ColumnSelector(op_columns), cupy_inputs)
for col in op_columns:
assert np.allclose(cupy_outputs[col], new_gdf[col].values)
@pytest.mark.parametrize("cpu", _CPU)
def test_normalize_std_zero(cpu):
df = pd.DataFrame({"a": 7 * [10]})
dataset = nvt.Dataset(df, cpu=cpu)
processor = nvtabular.Workflow(["a"] >> ops.Normalize())
processor.fit(dataset)
result = processor.transform(dataset).compute()["a"]
assert (result == 0).all()
@pytest.mark.parametrize("gpu_memory_frac", [0.1])
@pytest.mark.parametrize("engine", ["parquet"])
@pytest.mark.parametrize("op_columns", [["x"]])
def test_normalize_upcastfloat64(tmpdir, dataset, gpu_memory_frac, engine, op_columns):
df = dispatch._make_df({"x": [1.9e10, 2.3e16, 3.4e18, 1.6e19], "label": [1.0, 0.0, 1.0, 0.0]})
cont_features = op_columns >> ops.Normalize()
processor = nvtabular.Workflow(cont_features)
dataset = nvt.Dataset(df)
processor.fit(dataset)
new_gdf = processor.transform(dataset).to_ddf().compute()
for col in op_columns:
assert math.isclose(df[col].mean(), processor.output_node.op.means[col], rel_tol=1e-4)
assert math.isclose(df[col].std(), processor.output_node.op.stds[col], rel_tol=1e-4)
df[col] = (df[col] - processor.output_node.op.means[col]) / processor.output_node.op.stds[
col
]
assert np.all((df[col] - new_gdf[col]).abs().values <= 1e-2)
|
spirit/core/storage.py | Ke-xueting/Spirit | 974 | 11108847 | # -*- coding: utf-8 -*-
import django
from django.core.files.storage import (
FileSystemStorage, default_storage, get_storage_class)
from .conf import settings
__all__ = [
'spirit_storage',
'spirit_storage_or_none',
'OverwriteFileSystemStorage']
class OverwriteFileSystemStorage(FileSystemStorage):
def get_available_name(self, name, **kwargs):
self.delete(name)
return name
def select_storage(default=default_storage):
"""returns ``None`` if there is no custom storage"""
if settings.ST_STORAGE is None:
return default
if settings.ST_STORAGE == 'spirit.core.storage.OverwriteFileSystemStorage':
return OverwriteFileSystemStorage()
return get_storage_class(settings.ST_STORAGE)()
# In Django +3.1 the callback can be passed to FileField
# storage, and it won't create a migration
if django.VERSION[:2] >= (3, 1):
spirit_storage_or_none = select_storage
else:
spirit_storage_or_none = select_storage(default=None)
spirit_storage = select_storage()
|
addition_module/DMUE/config.py | weihaoxie/FaceX-Zoo | 1,329 | 11108894 | import os
import getpass
def parse_lb_txt(filename):
lines = open(filename, 'r').readlines()
train_dataset, test_dataset = [], []
for line in lines:
key, label = line.split(' ')[0], line[-2]
label = int(label)
mode, img_path = key.split('_') #
if mode == 'train':
train_dataset.append([key, label])
elif mode == 'test':
test_dataset.append([key, label])
return train_dataset, test_dataset
class Config:
num_classes = 8
ori_shape = (256, 256)
label_path = "/path/to/your/lb2.txt"
lmdb_f = "/path/to/your/AffectNet_lmdb/"
train_dataset, val_dataset = parse_lb_txt(label_path)
w, T = 0.5, 1.2
gamma = 1000
ramp_a = 6 # affectnet 4/6; ferplus 10/12/14; raf 9/10
batch_size = 72
test_minibatch=16
num_workers = 4
lr1 = [[6, 0.0001], [12, 0.00005], [20, 0.00001], [22, 0.00001], [25, 0.00005], [60, 0.00001]]
lr2 = [[4, 0.001], [8, 0.0005], [14, 0.0001], [22, 0.00001], [25, 0.00005], [60, 0.00001]]
bnneck = True # False for resnet50_ibn
use_dropout = True
BiasInCls = False
fc_num = 2
train_mode = 'sp_confidence'
second_order_statics = 'mean' # all, mean, var
# -----------saving dirs-------#
ckpt_root_dir = './checkpoints'
output_dir = 'AffectNet_res18'
# ---------------------------------------------------------------------------- #
# Input
# ---------------------------------------------------------------------------- #
image_crop_size = (224, 224)
padding = 0
image_channel = 3
horizontal_flip_p = 0.5
normalize_mean = [0.5, 0.5, 0.5]
normalize_std = [0.5, 0.5, 0.5]
# ---------------------------------------------------------------------------- #
# Model
# ---------------------------------------------------------------------------- #
num_branches = num_classes + 1
assert num_branches == (num_classes + 1)
backbone = 'resnet18'
pretrained = './pretrain/checkpoints/out_dir_res18/mv_epoch_17.pt'
pretrained_choice = 'msra' # '' or 'msra'
last_stride = 2
frozen_stages = -1
pooling_method = 'GAP'
# ---------------------------------------------------------------------------- #
# Optimizer
# ---------------------------------------------------------------------------- #
start_epoch = 0
max_epoch = 36
weight_decay = 1e-4
# set different lr to the backbone and the classifier
def get_lr(self, epoch):
for it_lr in self.lr1:
if epoch < it_lr[0]:
_lr1 = it_lr[1]
break
for it_lr in self.lr2:
if epoch < it_lr[0]:
_lr2 = it_lr[1]
break
return _lr1, _lr2
config = Config()
|
update_version.py | xarala221/django-erp | 345 | 11108924 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import os
import sys
from datetime import datetime
def replace(root, replace_dict):
if root[-1] == "/":
root = root[0:-1]
for x in os.listdir(root):
path = root + "/" + x
if os.path.isdir(path):
replace(path, replace_dict)
else:
p, sep, ext = path.partition('.')
if ext in ("py", "py.tmpl"):
replaced = False
infile = open(path, "r+")
text = infile.read()
infile.seek(0)
for old_string, new_string in replace_dict.items():
if text.find(old_string) != -1:
text = text.replace(old_string, new_string)
replaced = True
infile.seek(0)
infile.truncate()
infile.write(text)
if replaced:
print "Replaced in " + path
infile.close()
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s <new_version>" % sys.argv[0]
sys.exit(1)
ref_file = open("djangoerp/__init__.py")
text = ref_file.read()
ref_file.close()
replace_dict = {}
for line in text.split("\n"):
if line.startswith("__copyright__"):
replace_dict[line] = "__copyright__ = 'Copyright (c) 2013-%d, django ERP Team'" % datetime.now().year
if line.startswith("__version__"):
replace_dict[line] = "__version__ = '%s'" % sys.argv[1]
replace("djangoerp", replace_dict)
|
benedict/__init__.py | fabiocaccamo/python-benedict | 365 | 11108944 | # -*- coding: utf-8 -*-
from benedict.dicts import benedict
from benedict.metadata import (
__author__,
__copyright__,
__description__,
__license__,
__title__,
__version__,
)
|
pages/extensions/amp_dev/markdown_extras/block_tip.py | sss55b/amp.dev | 300 | 11108973 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
TIP_TRIGGER = '[tip'
TIP_START_TAG_PATTERN = re.compile(r'(\[tip( type=\"(.*?)\")?\])',
re.MULTILINE)
TIP_END_TAG_PATTERN = '[/tip]'
ALLOWED_TYPES = ['default', 'important', 'read-on']
def trigger(original_body, content):
if TIP_TRIGGER in original_body:
return _transform(content)
return content
def _transform(content):
for match in TIP_START_TAG_PATTERN.findall(content):
# For tips without a type
if not match[1]:
content = content.replace(
match[0],
'{% call tip(\'\', type=\'note\') %}')
if match[2]:
type = 'default' if not match[2] in ALLOWED_TYPES else match[2]
content = content.replace(match[0], '{% call tip(\'\', type=\'' + type + '\') %}')
# Then also replace end tags
content = content.replace(TIP_END_TAG_PATTERN, '{% endcall %}')
return content
|
extensions/RichardHe.you-complete-me-1.0.36/ycmd/ycmd/tests/cs/subcommands_test.py | Sunshengjin/RoboWare-Studio | 239 | 11108986 | #!/usr/bin/env python
#
# Copyright (C) 2015 ycmd contributors.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from webtest import TestApp, AppError
from nose.tools import eq_, ok_
from ... import handlers
from .cs_handlers_test import Cs_Handlers_test
import re
import os.path
class Cs_Subcommands_test( Cs_Handlers_test ):
def GoTo_Basic_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GoTo' ],
line_num = 9,
column_num = 15,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
'filepath': self._PathToTestFile( 'testy', 'Program.cs' ),
'line_num': 7,
'column_num': 3
}, self._app.post_json( '/run_completer_command', goto_data ).json )
self._StopOmniSharpServer( filepath )
def GoToImplementation_Basic_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest(
completer_target = 'filetype_default',
command_arguments = [ 'GoToImplementation' ],
line_num = 13,
column_num = 13,
contents = contents,
filetype = 'cs',
filepath = filepath
)
eq_( {
'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
'line_num': 30,
'column_num': 3
}, self._app.post_json( '/run_completer_command', goto_data ).json )
self._StopOmniSharpServer( filepath )
def GoToImplementation_NoImplementation_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest(
completer_target = 'filetype_default',
command_arguments = [ 'GoToImplementation' ],
line_num = 17,
column_num = 13,
contents = contents,
filetype = 'cs',
filepath = filepath
)
try:
self._app.post_json( '/run_completer_command', goto_data ).json
raise Exception("Expected a 'No implementations found' error")
except AppError as e:
if 'No implementations found' in str(e):
pass
else:
raise
finally:
self._StopOmniSharpServer( filepath )
def CsCompleter_InvalidLocation_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest(
completer_target = 'filetype_default',
command_arguments = [ 'GoToImplementation' ],
line_num = 2,
column_num = 1,
contents = contents,
filetype = 'cs',
filepath = filepath
)
try:
self._app.post_json( '/run_completer_command', goto_data ).json
raise Exception( 'Expected a "Can\\\'t jump to implementation" error' )
except AppError as e:
if 'Can\\\'t jump to implementation' in str(e):
pass
else:
raise
finally:
self._StopOmniSharpServer( filepath )
def GoToImplementationElseDeclaration_NoImplementation_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest(
completer_target = 'filetype_default',
command_arguments = [ 'GoToImplementationElseDeclaration' ],
line_num = 17,
column_num = 13,
contents = contents,
filetype = 'cs',
filepath = filepath
)
eq_( {
'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
'line_num': 35,
'column_num': 3
}, self._app.post_json( '/run_completer_command', goto_data ).json )
self._StopOmniSharpServer( filepath )
def GoToImplementationElseDeclaration_SingleImplementation_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest(
completer_target = 'filetype_default',
command_arguments = [ 'GoToImplementationElseDeclaration' ],
line_num = 13,
column_num = 13,
contents = contents,
filetype = 'cs',
filepath = filepath
)
eq_( {
'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
'line_num': 30,
'column_num': 3
}, self._app.post_json( '/run_completer_command', goto_data ).json )
self._StopOmniSharpServer( filepath )
def GoToImplementationElseDeclaration_MultipleImplementations_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
goto_data = self._BuildRequest(
completer_target = 'filetype_default',
command_arguments = [ 'GoToImplementationElseDeclaration' ],
line_num = 21,
column_num = 13,
contents = contents,
filetype = 'cs',
filepath = filepath
)
eq_( [ {
'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
'line_num': 43,
'column_num': 3
}, {
'filepath': self._PathToTestFile( 'testy', 'GotoTestCase.cs' ),
'line_num': 48,
'column_num': 3
} ], self._app.post_json( '/run_completer_command', goto_data ).json )
self._StopOmniSharpServer( filepath )
def GetType_EmptyMessage_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
gettype_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetType' ],
line_num = 1,
column_num = 1,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
u'message': u""
}, self._app.post_json( '/run_completer_command', gettype_data ).json )
self._StopOmniSharpServer( filepath )
def GetType_VariableDeclaration_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
gettype_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetType' ],
line_num = 4,
column_num = 5,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
u'message': u"string"
}, self._app.post_json( '/run_completer_command', gettype_data ).json )
self._StopOmniSharpServer( filepath )
def GetType_VariableUsage_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
gettype_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetType' ],
line_num = 5,
column_num = 5,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
u'message': u"string str"
}, self._app.post_json( '/run_completer_command', gettype_data ).json )
self._StopOmniSharpServer( filepath )
def GetType_Constant_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
gettype_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetType' ],
line_num = 4,
column_num = 14,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
u'message': u"System.String"
}, self._app.post_json( '/run_completer_command', gettype_data ).json )
self._StopOmniSharpServer( filepath )
def GetType_DocsIgnored_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetTypeTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
gettype_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetType' ],
line_num = 9,
column_num = 34,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
u'message': u"int GetTypeTestCase.an_int_with_docs;",
}, self._app.post_json( '/run_completer_command', gettype_data ).json )
self._StopOmniSharpServer( filepath )
def GetDoc_Variable_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetDocTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
getdoc_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetDoc' ],
line_num = 13,
column_num = 28,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( {
'detailed_info': 'int GetDocTestCase.an_int;\n'
'an integer, or something',
}, self._app.post_json( '/run_completer_command', getdoc_data ).json )
self._StopOmniSharpServer( filepath )
def GetDoc_Function_test( self ):
filepath = self._PathToTestFile( 'testy', 'GetDocTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
getdoc_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'GetDoc' ],
line_num = 33,
column_num = 27,
contents = contents,
filetype = 'cs',
filepath = filepath )
# It seems that Omnisharp server eats newlines
eq_( {
'detailed_info': 'int GetDocTestCase.DoATest();\n'
' Very important method. With multiple lines of '
'commentary And Format- -ting',
}, self._app.post_json( '/run_completer_command', getdoc_data ).json )
self._StopOmniSharpServer( filepath )
def _RunFixIt( self, line, column, expected_result ):
filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
fixit_data = self._BuildRequest( completer_target = 'filetype_default',
command_arguments = [ 'FixIt' ],
line_num = line,
column_num = column,
contents = contents,
filetype = 'cs',
filepath = filepath )
eq_( expected_result,
self._app.post_json( '/run_completer_command', fixit_data ).json )
self._StopOmniSharpServer( filepath )
def FixIt_RemoveSingleLine_test( self ):
filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
self._RunFixIt( 11, 1, {
u'fixits': [
{
u'location': {
u'line_num': 11,
u'column_num': 1,
u'filepath': filepath
},
u'chunks': [
{
u'replacement_text': '',
u'range': {
u'start': {
u'line_num': 10,
u'column_num': 20,
u'filepath': filepath
},
u'end': {
u'line_num': 11,
u'column_num': 30,
u'filepath': filepath
},
}
}
]
}
]
} )
def FixIt_MultipleLines_test( self ):
filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
self._RunFixIt( 19, 1, {
u'fixits': [
{
u'location': {
u'line_num': 19,
u'column_num': 1,
u'filepath': filepath
},
u'chunks': [
{
u'replacement_text': "return On",
u'range': {
u'start': {
u'line_num': 20,
u'column_num': 13,
u'filepath': filepath
},
u'end': {
u'line_num': 21,
u'column_num': 35,
u'filepath': filepath
},
}
}
]
}
]
} )
def FixIt_SpanFileEdge_test( self ):
filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
self._RunFixIt( 1, 1, {
u'fixits': [
{
u'location': {
u'line_num': 1,
u'column_num': 1,
u'filepath': filepath
},
u'chunks': [
{
u'replacement_text': 'System',
u'range': {
u'start': {
u'line_num': 1,
u'column_num': 7,
u'filepath': filepath
},
u'end': {
u'line_num': 3,
u'column_num': 18,
u'filepath': filepath
},
}
}
]
}
]
} )
def FixIt_AddTextInLine_test( self ):
filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
self._RunFixIt( 9, 1, {
u'fixits': [
{
u'location': {
u'line_num': 9,
u'column_num': 1,
u'filepath': filepath
},
u'chunks': [
{
u'replacement_text': ', StringComparison.Ordinal',
u'range': {
u'start': {
u'line_num': 9,
u'column_num': 29,
u'filepath': filepath
},
u'end': {
u'line_num': 9,
u'column_num': 29,
u'filepath': filepath
},
}
}
]
}
]
} )
def FixIt_ReplaceTextInLine_test( self ):
filepath = self._PathToTestFile( 'testy', 'FixItTestCase.cs' )
self._RunFixIt( 10, 1, {
u'fixits': [
{
u'location': {
u'line_num': 10,
u'column_num': 1,
u'filepath': filepath
},
u'chunks': [
{
u'replacement_text': 'const int',
u'range': {
u'start': {
u'line_num': 10,
u'column_num': 13,
u'filepath': filepath
},
u'end': {
u'line_num': 10,
u'column_num': 16,
u'filepath': filepath
},
}
}
]
}
]
} )
def StopServer_NoErrorIfNotStarted_test( self ):
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
self._StopOmniSharpServer( filepath )
# Success = no raise
def StopServer_KeepLogFiles_test( self ):
yield self._StopServer_KeepLogFiles, True
yield self._StopServer_KeepLogFiles, False
def _StopServer_KeepLogFiles( self, keeping_log_files ):
self._ChangeSpecificOptions(
{ 'server_keep_logfiles': keeping_log_files } )
self._app = TestApp( handlers.app )
self._app.post_json(
'/ignore_extra_conf_file',
{ 'filepath': self._PathToTestFile( '.ycm_extra_conf.py' ) } )
filepath = self._PathToTestFile( 'testy', 'GotoTestCase.cs' )
contents = open( filepath ).read()
event_data = self._BuildRequest( filepath = filepath,
filetype = 'cs',
contents = contents,
event_name = 'FileReadyToParse' )
self._app.post_json( '/event_notification', event_data )
self._WaitUntilOmniSharpServerReady( filepath )
event_data = self._BuildRequest( filetype = 'cs', filepath = filepath )
debuginfo = self._app.post_json( '/debug_info', event_data ).json
log_files_match = re.search( "^OmniSharp logfiles:\n(.*)\n(.*)",
debuginfo,
re.MULTILINE )
stdout_logfiles_location = log_files_match.group( 1 )
stderr_logfiles_location = log_files_match.group( 2 )
try:
ok_( os.path.exists(stdout_logfiles_location ),
"Logfile should exist at {0}".format( stdout_logfiles_location ) )
ok_( os.path.exists( stderr_logfiles_location ),
"Logfile should exist at {0}".format( stderr_logfiles_location ) )
finally:
self._StopOmniSharpServer( filepath )
if keeping_log_files:
ok_( os.path.exists( stdout_logfiles_location ),
"Logfile should still exist at "
"{0}".format( stdout_logfiles_location ) )
ok_( os.path.exists( stderr_logfiles_location ),
"Logfile should still exist at "
"{0}".format( stderr_logfiles_location ) )
else:
ok_( not os.path.exists( stdout_logfiles_location ),
"Logfile should no longer exist at "
"{0}".format( stdout_logfiles_location ) )
ok_( not os.path.exists( stderr_logfiles_location ),
"Logfile should no longer exist at "
"{0}".format( stderr_logfiles_location ) )
|
app.py | e-naharnovic/university-domains-list-api | 102 | 11108990 | <gh_stars>100-1000
import json
from collections import defaultdict
from flask import Flask, request, Response
from pytrie import Trie
import uuid
import requests
import time
import re
app = Flask(__name__)
data = list()
country_index = defaultdict(list)
name_index = dict()
domain_index = defaultdict(list)
# Time to wait before allowing an update to our dataset. 86400 seconds = 24 hours
UPDATE_WAIT_TIME = 86400
@app.route("/search")
def search():
if not data_loaded:
load_data()
country = request.args.get('country')
name = request.args.get('name')
name_contains = request.args.get('name_contains')
domain = request.args.get("domain")
filtered = data
if name and country:
name = name.lower()
country = country.lower()
name_filtered = prefix_tree.values(prefix=name)
filtered = [uni for uni in name_filtered if uni['country'].lower() == country]
elif name_contains and country:
country = country.lower()
regex = re.compile(r'\b{0}'.format(name_contains.lower()))
name_filtered = [uni for uni in data if regex.search(uni['name'].lower())]
filtered = [uni for uni in name_filtered if uni['country'].lower() == country]
elif name_contains:
regex = re.compile(r'\b{0}'.format(name_contains.lower()))
filtered = [uni for uni in data if regex.search(uni['name'].lower())]
elif name:
name = name.lower()
filtered = prefix_tree.values(prefix=name)
elif country:
country = country.lower()
filtered = country_index[country]
elif domain:
filtered = domain_index[domain]
return Response(json.dumps(filtered), mimetype='application/json')
data_loaded = False
last_updated = 0
def load_data():
global data_loaded, prefix_tree, data, country_index, name_index, domain_index
response = requests.get("https://raw.githubusercontent.com/Hipo/university-domains-list/master/world_universities_and_domains.json")
data = response.json()
for i in data:
country_index[i["country"].lower()].append(i)
name_index[i['name'].lower()] = i
for domain in i["domains"]:
domain_index[domain].append(i)
splitted = i['name'].split(" ")
if len(splitted) > 1:
for splitted_name in splitted[1:]:
name_index[splitted_name.lower() + str(uuid.uuid1())] = i
prefix_tree = Trie(**name_index)
data_loaded = True
@app.route('/update')
def update():
global last_updated
if (time.time() >= last_updated + UPDATE_WAIT_TIME):
load_data()
last_updated = time.time()
response = {'status': 'success', 'message': 'Dataset updated!'}
else:
response = {'status': 'error', 'message': 'Dataset had been updated recently. Try again later.'}
return json.dumps(response)
@app.route('/')
def index():
if not data_loaded:
load_data()
data = {'author': {'name': 'hipo', 'website': 'http://hipolabs.com'},
'example': 'http://universities.hipolabs.com/search?name=middle&country=Turkey',
'github': 'https://github.com/Hipo/university-domains-list'}
return json.dumps(data)
if __name__ == "__main__":
app.run(debug=False)
|
alipay/aop/api/response/AlipayMerchantPayforprivilegeMerchantremainingQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 11108997 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayMerchantPayforprivilegeMerchantremainingQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayMerchantPayforprivilegeMerchantremainingQueryResponse, self).__init__()
self._unused_benefit = None
self._unused_principal = None
self._used_benefit = None
self._used_principal = None
@property
def unused_benefit(self):
return self._unused_benefit
@unused_benefit.setter
def unused_benefit(self, value):
self._unused_benefit = value
@property
def unused_principal(self):
return self._unused_principal
@unused_principal.setter
def unused_principal(self, value):
self._unused_principal = value
@property
def used_benefit(self):
return self._used_benefit
@used_benefit.setter
def used_benefit(self, value):
self._used_benefit = value
@property
def used_principal(self):
return self._used_principal
@used_principal.setter
def used_principal(self, value):
self._used_principal = value
def parse_response_content(self, response_content):
response = super(AlipayMerchantPayforprivilegeMerchantremainingQueryResponse, self).parse_response_content(response_content)
if 'unused_benefit' in response:
self.unused_benefit = response['unused_benefit']
if 'unused_principal' in response:
self.unused_principal = response['unused_principal']
if 'used_benefit' in response:
self.used_benefit = response['used_benefit']
if 'used_principal' in response:
self.used_principal = response['used_principal']
|
websauna/system/admin/views.py | stevepiercy/websauna | 286 | 11109025 | <filename>websauna/system/admin/views.py
"""Admin interface main views. """
# Pyramid
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config
from pyramid_layout.panel import panel_config
# Websauna
from websauna.system.admin.modeladmin import ModelAdmin
from websauna.system.admin.modeladmin import ModelAdminRoot
from websauna.system.admin.utils import get_admin
from websauna.system.core.panel import render_panel
from websauna.system.core.viewconfig import view_overrides
from websauna.system.crud import views as crud_views
from websauna.system.crud import listing
from websauna.system.crud.formgenerator import SQLAlchemyFormGenerator
from websauna.system.crud.sqlalchemy import sqlalchemy_deleter
from websauna.system.crud.views import TraverseLinkButton
@view_config(route_name='admin_home', renderer='admin/admin.html', permission='view')
def admin(request):
"""Admin front page page."""
admin = get_admin(request)
# For now, admin panels always appear in ascending order
model_admin_root = admin["models"]
# TODO: Have renderer adapters for panels, so that they can override views
admin_panels = sorted(model_admin_root.items(), key=lambda pair: pair[1].title)
rendered_panels = [render_panel(ma, request, name="admin_panel") for id, ma in admin_panels]
return dict(panels=rendered_panels)
@panel_config(name='admin_panel', context=ModelAdmin, renderer='admin/model_panel.html')
def default_model_admin_panel(context, request, **kwargs):
"""Generic panel for any model admin.
Display count of items in the database.
"""
model_admin = context
count = model_admin.get_query().count()
admin = model_admin.__parent__
title = model_admin.title
return dict(locals(), **kwargs)
class Listing(crud_views.Listing):
"""Base listing view for all admin models.
"""
base_template = "admin/base.html"
table = listing.Table(
columns=[
listing.Column(id="id", name="Id",),
listing.StringPresentationColumn(id="value", name="Value"),
listing.ControlsColumn()
]
)
@property
def title(self):
return "All {}".format(self.context.title)
@view_config(context=ModelAdmin, name="listing", renderer="crud/listing.html", route_name="admin", permission='view')
def listing(self):
# We override this method just to define admin route_name traversing
return super(Listing, self).listing()
class Show(crud_views.Show):
"""Default show view for model admin."""
base_template = "admin/base.html"
form_generator = SQLAlchemyFormGenerator()
resource_buttons = [
TraverseLinkButton(id="edit", name="Edit", view_name="edit", permission="edit"),
TraverseLinkButton(id="delete", name="Delete", view_name="delete", permission="delete"),
TraverseLinkButton(id="shell", name="Shell", view_name="shell", permission="shell", tooltip="Open IPython Notebook shell and have this item prepopulated in obj variable.", feature="notebook"),
]
@view_config(context=ModelAdmin.Resource, name="show", renderer="crud/show.html", route_name="admin", permission='view')
def show(self):
# We override this method just to define admin route_name traversing
return super(Show, self).show()
class Edit(crud_views.Edit):
"""Default edit vie for model admin."""
base_template = "admin/base.html"
form_generator = SQLAlchemyFormGenerator()
@view_config(context=ModelAdmin.Resource, name="edit", renderer="crud/edit.html", route_name="admin", permission='edit')
def edit(self):
# We override this method just to define admin route_name traversing
return super(Edit, self).edit()
class Add(crud_views.Add):
"""Default add view for model admin."""
base_template = "admin/base.html"
form_generator = SQLAlchemyFormGenerator()
@view_config(context=ModelAdmin, name="add", renderer="crud/add.html", route_name="admin", permission='add')
def add(self):
# We override this method just to define admin route_name traversing
return super(Add, self).add()
@view_overrides(context=ModelAdmin.Resource, route_name="admin")
class Delete(crud_views.Delete):
"""Delete view for SQLAlchemy model admins."""
base_template = "admin/base.html"
deleter = sqlalchemy_deleter
@view_config(context=ModelAdmin, name="", route_name="admin", permission='view')
def model_admin_default_view(context, request):
"""Redirect to listing if model admin URL is being accessed without a view name."""
return HTTPFound(request.resource_url(context, "listing"))
@view_config(context=ModelAdmin.Resource, name="", route_name="admin", permission='view')
def model_resource_default_view(context, request):
"""Redirect to show if model instance URL is being accessed without a view name."""
return HTTPFound(request.resource_url(context, "show"))
@view_config(route_name='admin', context=ModelAdminRoot, permission='view')
def view__model_admin_root(context, request):
"""Model admin root does not have a view per se so we redirect to admin root."""
return HTTPFound(request.resource_url(context.__parent__))
|
starfish/core/intensity_table/test/factories.py | haoxusci/starfish | 164 | 11109031 | import numpy as np
import xarray as xr
from starfish import DecodedIntensityTable, IntensityTable
from starfish.core.codebook.test.factories import codebook_array_factory, loaded_codebook
from starfish.core.types import Coordinates, Features
from ..overlap import Area
def synthetic_intensity_table() -> IntensityTable:
return IntensityTable.synthetic_intensities(loaded_codebook(), n_spots=2)
def synthetic_decoded_intensity_table(
codebook,
num_z: int = 12,
height: int = 50,
width: int = 40,
n_spots: int = 10,
mean_fluor_per_spot: int = 200,
mean_photons_per_fluor: int = 50,
) -> DecodedIntensityTable:
"""
Creates an IntensityTable with synthetic spots, that correspond to valid
codes in a provided codebook.
Parameters
----------
codebook : Codebook
Starfish codebook object.
num_z : int
Number of z-planes to use when localizing spots.
height : int
y dimension of each synthetic plane.
width : int
x dimension of each synthetic plane.
n_spots : int
Number of spots to generate.
mean_fluor_per_spot : int
Mean number of fluorophores per spot.
mean_photons_per_fluor : int
Mean number of photons per fluorophore.
Returns
-------
DecodedIntensityTable
"""
intensities = IntensityTable.synthetic_intensities(
codebook,
num_z=num_z,
height=height,
width=width,
n_spots=n_spots,
mean_fluor_per_spot=mean_fluor_per_spot,
mean_photons_per_fluor=mean_photons_per_fluor
)
targets = np.random.choice(
codebook.coords[Features.TARGET], size=n_spots, replace=True)
return DecodedIntensityTable.from_intensity_table(intensities, targets=(Features.AXIS, targets))
def assign_synthetic_targets(intensities: IntensityTable) -> DecodedIntensityTable:
"""
Assign fake target values to the given IntensityTable
Parameters
----------
intensities : IntensityTable
intensity_table to assign target values to
Returns
-------
DecodedIntensityTable
"""
intensities = DecodedIntensityTable(intensities)
return DecodedIntensityTable.from_intensity_table(
intensities,
targets=(Features.AXIS, np.random.choice(list('ABCD'), size=20)),
distances=(Features.AXIS, np.random.rand(20)))
def create_intensity_table_with_coords(area: Area, n_spots: int=10) -> IntensityTable:
"""
Creates a 50X50 intensity table with physical coordinates within
the given Area.
Parameters
----------
area: Area
The area of physical space the IntensityTable should be defined over
n_spots:
Number of spots to add to the IntensityTable
"""
codebook = codebook_array_factory()
it = IntensityTable.synthetic_intensities(
codebook,
num_z=1,
height=50,
width=50,
n_spots=n_spots
)
# intensity table 1 has 10 spots, xmin = 0, ymin = 0, xmax = 2, ymax = 1
it[Coordinates.X.value] = xr.DataArray(np.linspace(area.min_x, area.max_x, n_spots),
dims=Features.AXIS)
it[Coordinates.Y.value] = xr.DataArray(np.linspace(area.min_y, area.max_y, n_spots),
dims=Features.AXIS)
return it
|
Challenge2/main.py | Gerifield/websocket-smuggle | 284 | 11109043 | <filename>Challenge2/main.py
#!/usr/bin/python
from flask import Flask, render_template, request, Response
from flask_socketio import SocketIO
from flask_restful import Resource, Api
from urlparse import urlparse
import requests
requests.packages.urllib3.disable_warnings()
app = Flask(__name__)
api = Api(app)
socketio = SocketIO(app, cors_allowed_origins='*', path='/api/public/socket.io')
class PublicProxyAPI(Resource):
def get(self):
return Response(status=405)
def post(self):
def check(_url):
parsed = urlparse(_url)
if parsed.port == 5000:
return False
if 'flag' in parsed.path.lower():
return False
bad = ('localhost', '127.', 'xip.io') # shitty check
if any(w for w in bad if w in parsed.netloc.lower()):
return False
return True
url = request.form.get('url')
if not url:
return {'message': 'Mandatory URL not specified'}, 400
if not check(url):
return {'message': 'Access to URL is denied'}, 403
try:
response = requests.get(url, allow_redirects=False, verify=False, timeout=40)
except:
return Response(status=500)
return Response(status=response.status_code)
class Flag(Resource):
def get(self):
return {'flag': 'WHAt A WoNdeRFUL dAY for a cry OF De5p4iR ANd h3Ar7acH3'}
api.add_resource(Flag, '/flag')
api.add_resource(PublicProxyAPI, '/api/public/healthcheck')
@socketio.on('my event')
def handle_my_custom_event(json):
#print('received json: ' + str(json))
pass
if __name__ == '__main__':
socketio.run(app, port=5000, host='127.0.0.1')
|
vroom/environment.py | groschovskiy/vroom | 134 | 11109057 | """A vroom test execution environment.
This is an object with all of the vroom verifiers asked. Good for one file.
"""
import vroom.buffer
import vroom.messages
import vroom.output
import vroom.shell
import vroom.vim
class Environment(object):
"""The environment object.
Sets up all the verifiers and managers and communicators you'll ever need.
"""
def __init__(self, filename, args):
self.args = args
self.message_strictness = args.message_strictness
self.system_strictness = args.system_strictness
self.filename = filename
self.writer = vroom.output.Writer(filename, args)
self.shell = vroom.shell.Communicator(filename, self, self.writer)
if args.neovim:
import vroom.neovim_mod as neovim_mod
self.vim = neovim_mod.Communicator(args, self.shell.env, self.writer)
else:
self.vim = vroom.vim.Communicator(args, self.shell.env, self.writer)
self.buffer = vroom.buffer.Manager(self.vim)
self.messenger = vroom.messages.Messenger(self.vim, self, self.writer)
|
beagle/datasources/elasticsearch_qs.py | limkokhian/beagle | 1,139 | 11109100 | <filename>beagle/datasources/elasticsearch_qs.py
from typing import Generator
from beagle.common.logging import logger
from beagle.config import Config
from beagle.datasources.base_datasource import ExternalDataSource
from beagle.transformers.generic_transformer import GenericTransformer
class ElasticSearchQSSerach(ExternalDataSource):
"""Datasource which allows transforming the results of a Elasticsearch Query String search into a
graph.
Parameters
----------
index : str
Elasticsearch index, by default "logs-*"
query : str
Elasticsearch query string, by default "*"
earilest : str, optional
The earliest time modifier, by default "-7d"
latest : str, optional
The latest time modifier, by default "now"
Raises
------
RuntimeError
If there are no Elasticsearch credentials configured.
"""
name = "Elasticsearch Query String"
transformers = [GenericTransformer]
category = "Elasticsearch"
def __init__(
self, index: str = "logs-*", query: str = "*", earliest: str = "-7d", latest: str = "now"
):
"""Creates a splunk query to pull data from
Parameters
----------
index : str
Elasticsearch index, by default "logs-*"
query : str
Elasticsearch query string, by default "*"
earilest : str, optional
The earliest time modifier, by default "-7d"
latest : str, optional
The latest time modifier, by default "now"
"""
self.earliest = earliest
self.latest = latest
self.index = index
self.query = query
self.client = self._setup_session()
def _setup_session(self): # pragma: no cover
from elasticsearch import Elasticsearch
client_kwargs = {
"host": Config.get("elasticsearch", "host"),
"scheme": Config.get("elasticsearch", "scheme"),
"port": int(Config.get("elasticsearch", "port", fallback=9200)),
}
if Config.get("elasticsearch", "username") and Config.get("elasticsearch", "password"):
client_kwargs["http_auth"] = (
Config.get("elasticsearch", "username"),
Config.get("elasticsearch", "password"),
)
logger.info(f"Creating Elasticsearch client for host={client_kwargs['host']}")
return Elasticsearch(**client_kwargs)
def events(self) -> Generator[dict, None, None]:
query = {
"query": {
"bool": {
"must": {"query_string": {"query": self.query}},
"filter": [
{
"range": {
"@timestamp": {"gte": "now" + self.earliest, "lte": self.latest}
}
}
],
}
}
}
# Number of hits to return
size = int(Config.get("elasticsearch", "scroll_size", fallback=100))
data = self.client.search(index=self.index, body=query, scroll="2m", size=size)
# Get the scroll ID
sid = data["_scroll_id"]
scroll_size = len(data["hits"]["hits"])
while scroll_size > 0:
# Before scroll, process current batch of hits
for item in data["hits"]["hits"]:
source = item["_source"]
source["_id"] = item["_id"]
yield source
data = self.client.scroll(scroll_id=sid, scroll="2m")
# Update the scroll ID
sid = data["_scroll_id"]
# Get the number of results that returned in the last scroll
scroll_size = len(data["hits"]["hits"])
def metadata(self) -> dict: # pragma: no cover
return {
"index": self.index,
"query": self.query,
"earliest": self.earliest,
"latest": self.latest,
}
|
tests/test_sklearn_mlp_converter.py | vumichien/hummingbird | 2,772 | 11109101 | <filename>tests/test_sklearn_mlp_converter.py
"""
Tests sklearn MLP models (MLPClassifier, MLPRegressor) converters.
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.neural_network import MLPClassifier, MLPRegressor
import hummingbird.ml
from hummingbird.ml import constants
from hummingbird.ml._utils import tvm_installed
class TestSklearnMLPClassifier(unittest.TestCase):
# MLPClassifier test function to be parameterized
def _test_mlp_classifer(self, num_classes, activation="relu", labels_shift=0, backend="torch", extra_config={}):
model = MLPClassifier(hidden_layer_sizes=(100, 100, 50,), activation=activation)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100) + labels_shift
model.fit(X, y)
torch_model = hummingbird.ml.convert(model, backend, X, extra_config=extra_config)
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.predict_proba(X), torch_model.predict_proba(X), rtol=1e-6, atol=1e-6)
# MLPClassifier binary
def test_mlp_classifer_bi(self):
self._test_mlp_classifer(2)
# MLPClassifier multi-class
def test_mlp_classifer_multi(self):
self._test_mlp_classifer(3)
# MLPClassifier multi-class w/ shifted labels
def test_mlp_classifer_multi_shifted_labels(self):
self._test_mlp_classifer(3, labels_shift=3)
# MLPClassifier multi-class w/ tanh activation
def test_mlp_classifer_multi_logistic(self):
self._test_mlp_classifer(3, activation="tanh")
# MLPClassifier multi-class w/ logistic activation
def test_mlp_classifer_multi_tanh(self):
self._test_mlp_classifer(3, activation="logistic")
# MLPClassifier multi-class w/ identity activation
def test_mlp_classifer_multi_identity(self):
self._test_mlp_classifer(3, activation="identity")
# Test TVM backend
# MLPClassifier binary
@unittest.skipIf(not (tvm_installed()), reason="TVM tests require TVM")
def test_mlp_classifer_bi_tvm(self):
self._test_mlp_classifer(2, backend="tvm", extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})
# MLPClassifier multi-class
@unittest.skipIf(not (tvm_installed()), reason="TVM tests require TVM")
def test_mlp_classifer_multi_tvm(self):
self._test_mlp_classifer(3, backend="tvm", extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})
# MLPClassifier multi-class w/ shifted labels
@unittest.skipIf(not (tvm_installed()), reason="TVM tests require TVM")
def test_mlp_classifer_multi_shifted_labels_tvm(self):
self._test_mlp_classifer(3, labels_shift=3, backend="tvm", extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})
# MLPClassifier multi-class w/ tanh activation
@unittest.skipIf(not (tvm_installed()), reason="TVM tests require TVM")
def test_mlp_classifer_multi_logistic_tvm(self):
self._test_mlp_classifer(3, activation="tanh", backend="tvm", extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})
# MLPClassifier multi-class w/ logistic activation
@unittest.skipIf(not (tvm_installed()), reason="TVM tests require TVM")
def test_mlp_classifer_multi_tanh_tvm(self):
self._test_mlp_classifer(3, activation="logistic", backend="tvm", extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})
# MLPClassifier multi-class w/ identity activation
@unittest.skipIf(not (tvm_installed()), reason="TVM tests require TVM")
def test_mlp_classifer_multi_identity_tvm(self):
self._test_mlp_classifer(3, activation="identity", backend="tvm", extra_config={constants.TVM_MAX_FUSE_DEPTH: 30})
# MLPRegressor test function to be parameterized
def _test_mlp_regressor(self, activation="relu"):
model = MLPRegressor(hidden_layer_sizes=(100, 100, 50,), activation=activation)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.rand(100)
model.fit(X, y)
torch_model = hummingbird.ml.convert(model, "torch")
self.assertTrue(torch_model is not None)
np.testing.assert_allclose(model.predict(X), torch_model.predict(X), rtol=1e-6, atol=1e-6)
# MLPRegressor
def test_mlp_regressor(self):
self._test_mlp_regressor()
# MLPRegressor w/ tanh activation
def test_mlp_regressor_multi_logistic(self):
self._test_mlp_regressor(activation="tanh")
# MLPRegressor w/ logistic activation
def test_mlp_regressor_multi_tanh(self):
self._test_mlp_regressor(activation="logistic")
# MLPRegressor multi-class w/ identity activation
def test_mlp_regressor_multi_identity(self):
self._test_mlp_regressor(activation="identity")
if __name__ == "__main__":
unittest.main()
|
aztk_cli/entrypoint.py | Geims83/aztk | 161 | 11109113 | <reponame>Geims83/aztk
"""
AZTK module for the CLI entry point
Note: any changes to this file need have the package reinstalled
pip install -e .
"""
import argparse
import warnings
from typing import NamedTuple
from azure.batch.models import BatchErrorException
import aztk
from aztk_cli import constants, log, logger, utils
from aztk_cli.spark.endpoints import spark
from . import plugins, toolkit
# Makes sure the warnings are displayed nicely in the CLI without a stacktrace
def _show_warn(message, *_args):
log.warning(str(message))
def main():
parser = argparse.ArgumentParser(prog=constants.CLI_EXE)
setup_common_args(parser)
subparsers = parser.add_subparsers(title="Available Softwares", dest="software", metavar="<software>")
subparsers.required = True
spark_parser = subparsers.add_parser("spark", help="Commands to run spark jobs")
plugins_parser = subparsers.add_parser("plugins", help="Commands to list and view plugins")
toolkit_parser = subparsers.add_parser("toolkit", help="List current toolkit information and browse available ones")
spark.setup_parser(spark_parser)
plugins.setup_parser(plugins_parser)
toolkit.setup_parser(toolkit_parser)
args = parser.parse_args()
parse_common_args(args)
try:
run_software(args)
except BatchErrorException as e:
utils.print_batch_exception(e)
except aztk.error.AztkError as e:
log.error(str(e))
def setup_common_args(parser: argparse.ArgumentParser):
parser.add_argument("--version", action="version", version=aztk.version.__version__)
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging.")
def parse_common_args(args: NamedTuple):
if args.verbose:
logger.setup_logging(True)
log.debug("Verbose logging enabled")
else:
warnings.showwarning = _show_warn
logger.setup_logging(False)
def run_software(args: NamedTuple):
softwares = {}
softwares[aztk.models.Software.spark] = spark.execute
softwares["plugins"] = plugins.execute
softwares["toolkit"] = toolkit.execute
func = softwares[args.software]
func(args)
if __name__ == "__main__":
main()
|
mmdeploy/codebase/mmocr/models/text_detection/fpn_cat.py | aegis-rider/mmdeploy | 746 | 11109160 | <gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from packaging import version
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textdet.FPNC.forward', backend='tensorrt')
def fpnc__forward__tensorrt(ctx, self, inputs, **kwargs):
"""Rewrite `forward` of FPNC for tensorrt backend.
Rewrite this function to replace nearest upsampling with bilinear
upsampling. TensorRT-7 backend applies different nearest sampling strategy
from pytorch, which heavily influenced the final performance.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class FPNC.
inputs (Sequence[Tensor]): The feature maps for each scale level with
shape (N, num_anchors * num_classes, H, W)
Returns:
outs (Tensor): A feature map output from FPNC. The tensor shape
(N, C, H, W).
"""
# TensorRT version 8+ matches the upsampling with pytorch
import tensorrt as trt
apply_rewrite = version.parse(trt.__version__) < version.parse('8')
mode = 'bilinear' if apply_rewrite else 'nearest'
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
# build top-down path
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(
laterals[i], size=prev_shape, mode=mode)
# build outputs
# part 1: from original levels
outs = [
self.smooth_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
for i, out in enumerate(outs):
outs[i] = F.interpolate(outs[i], size=outs[0].shape[2:], mode=mode)
out = torch.cat(outs, dim=1)
if self.conv_after_concat:
out = self.out_conv(out)
return out
|
TradzQAI/tools/indicators/function_helper.py | kkuette/AI_project | 164 | 11109163 | <reponame>kkuette/AI_project
import numpy as np
def fill_for_noncomputable_vals(input_data, result_data):
non_computable_values = np.repeat(
np.nan, len(input_data) - len(result_data)
)
filled_result_data = np.append(non_computable_values, result_data)
return filled_result_data
|
anuga/shallow_water/tests/test_loadsave.py | samcom12/anuga_core | 136 | 11109168 | #!/usr/bin/env python
from future import standard_library
standard_library.install_aliases()
from builtins import range
import unittest, os, time
import os.path
from math import pi, sqrt
import tempfile
from anuga.file.netcdf import NetCDFFile
from anuga.file.sww import extent_sww
from anuga.config import g, epsilon
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a
from anuga.utilities.numerical_tools import mean, ensure_numeric
from anuga.geometry.polygon import is_inside_polygon
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.geospatial_data.geospatial_data import Geospatial_data
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular_cross, \
rectangular
from anuga.abstract_2d_finite_volumes.quantity import Quantity
from anuga.shallow_water.forcing import Inflow, Cross_section
from anuga.geospatial_data.geospatial_data import ensure_geospatial
from anuga.utilities.system_tools import get_pathname_from_package
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions \
import Dirichlet_boundary
from anuga.shallow_water.forcing import Rainfall, Wind_stress
from anuga.shallow_water.forcing import Inflow, Cross_section
from anuga.shallow_water.sww_interrogate import get_flow_through_cross_section
from anuga.shallow_water.shallow_water_domain import Domain
# boundary functions
from anuga.shallow_water.boundaries import Reflective_boundary, \
Field_boundary, Transmissive_momentum_set_stage_boundary, \
Transmissive_stage_zero_momentum_boundary
from anuga.abstract_2d_finite_volumes.generic_boundary_conditions\
import Transmissive_boundary, Dirichlet_boundary, \
Time_boundary, File_boundary, AWI_boundary
import numpy as num
from anuga.config import g
# Get gateway to C implementation of flux function for direct testing
from anuga.shallow_water.shallow_water_ext import flux_function_central as flux_function
from anuga.shallow_water.shallow_water_ext import rotate
def set_bottom_friction(tag, elements, domain):
if tag == "bottom":
domain.set_quantity('friction', 0.09, indices = elements)
def set_top_friction(tag, elements, domain):
if tag == "top":
domain.set_quantity('friction', 1., indices = elements)
def set_all_friction(tag, elements, domain):
if tag == 'all':
new_values = domain.get_quantity('friction').get_values(indices = elements) + 10.0
domain.set_quantity('friction', new_values, indices = elements)
# For test_fitting_using_shallow_water_domain example
def linear_function(point):
point = num.array(point)
return point[:,0]+point[:,1]
def scalar_func(t, x, y):
"""Function that returns a scalar.
Used to test error message when numeric array is expected
"""
return 17.7
class Test_LoadSave(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
for file in ['domain.sww', 'domain_pickle.pickle']:
try:
os.remove(file)
except:
pass
def test_get_flow_through_cross_section_with_geo(self):
"""test_get_flow_through_cross_section(self):
Test that the total flow through a cross section can be
correctly obtained at run-time from the ANUGA domain.
This test creates a flat bed with a known flow through it and tests
that the function correctly returns the expected flow.
The specifics are
e = -1 m
u = 2 m/s
h = 2 m
w = 3 m (width of channel)
q = u*h*w = 12 m^3/s
This run tries it with georeferencing and with elevation = -1
"""
# Create basic mesh (20m x 3m)
width = 3
length = 20
t_end = 1
points, vertices, boundary = rectangular(length, width, length, width)
# Create shallow water domain
domain = Domain(points, vertices, boundary,
geo_reference=Geo_reference(56, 308500, 6189000))
domain.default_order = 2
domain.set_quantities_to_be_stored(None)
e = -1.0
w = 1.0
h = w-e
u = 2.0
uh = u*h
Br = Reflective_boundary(domain) # Side walls
Bd = Dirichlet_boundary([w, uh, 0]) # 2 m/s across the 3 m inlet:
# Initial conditions
domain.set_quantity('elevation', e)
domain.set_quantity('stage', w)
domain.set_quantity('xmomentum', uh)
domain.set_boundary({'left': Bd, 'right': Bd, 'top': Br, 'bottom': Br})
# Interpolation points down the middle
I = [[0, width/2.],
[length/2., width/2.],
[length, width/2.]]
interpolation_points = domain.geo_reference.get_absolute(I)
for t in domain.evolve(yieldstep=0.1, finaltime=0.5):
# Shortcuts to quantites
stage = domain.get_quantity('stage')
xmomentum = domain.get_quantity('xmomentum')
ymomentum = domain.get_quantity('ymomentum')
# Check that quantities are they should be in the interior
w_t = stage.get_values(interpolation_points)
uh_t = xmomentum.get_values(interpolation_points)
vh_t = ymomentum.get_values(interpolation_points)
assert num.allclose(w_t, w)
assert num.allclose(uh_t, uh)
assert num.allclose(vh_t, 0.0, atol=1.0e-6)
# Check flows through the middle
for i in range(5):
x = length/2. + i*0.23674563 # Arbitrary
cross_section = [[x, 0], [x, width]]
cross_section = domain.geo_reference.get_absolute(cross_section)
Q = domain.get_flow_through_cross_section(cross_section,
verbose=False)
assert num.allclose(Q, uh*width)
import pickle
fid = open('domain_pickle.pickle', 'wb')
pickle.dump(domain, fid)
fid = open('domain_pickle.pickle', 'rb')
domain_restored = pickle.load(fid)
for t in domain_restored.evolve(yieldstep=0.1, finaltime=1.0):
# Shortcuts to quantites
stage = domain_restored.get_quantity('stage')
xmomentum = domain_restored.get_quantity('xmomentum')
ymomentum = domain_restored.get_quantity('ymomentum')
# Check that quantities are they should be in the interior
w_t = stage.get_values(interpolation_points)
uh_t = xmomentum.get_values(interpolation_points)
vh_t = ymomentum.get_values(interpolation_points)
assert num.allclose(w_t, w)
assert num.allclose(uh_t, uh)
assert num.allclose(vh_t, 0.0, atol=1.0e-6)
# Check flows through the middle
for i in range(5):
x = length/2. + i*0.23674563 # Arbitrary
cross_section = [[x, 0], [x, width]]
cross_section = domain_restored.geo_reference.get_absolute(cross_section)
Q = domain_restored.get_flow_through_cross_section(cross_section,
verbose=False)
assert num.allclose(Q, uh*width)
#-------------------------------------------------------------
if __name__ == "__main__":
suite = unittest.makeSuite(Test_LoadSave, 'test')
runner = unittest.TextTestRunner() #verbosity=2)
runner.run(suite)
|
icg/variationalnetwork.py | mklasby/mri-variationalnetwork | 119 | 11109204 | import tensorflow as tf
class VnBasicCell(object):
"""Variational Network Basic Cell class. The call function defines the calculation for a single stage.
Args:
inputs: list of inputs
constants: dictionary of constants
params: dictionary of (changing) parameters
const_params: dictionary of non-changing parameters
options: dictionary of additional options
"""
def __init__(self, inputs, params, const_params, constants, options=None):
self._inputs = inputs
self._constants = constants
self._params = params
self._const_params = const_params
self._options = options
@property
def inputs(self):
return self._inputs
@property
def constants(self):
return self._constants
@property
def params(self):
return self._params
def call(self, inputs, t):
return NotImplementedError('This has to be implemented in the derived class.')
class VariationalNetwork(object):
"""Variational Network class. Defines variational network for a given cell,
defining a single stage, and a given number of stages.
Args:
cell: single stage, defined by the given application
labels: number of stages
num_cycles: number of cycles (optional). For all standard variational applications its default value 1 is used.
parallel_iterations: number of parallel iterations used in while loop (optional) default=1
swap_memory: Allow swapping of memory in while loop (optional). default=false
"""
def __init__(self, cell, num_stages, num_cycles=1, parallel_iterations=1, swap_memory=False):
# Basic computational graph of a vn cell
self.cell = cell
# Define the number of repetitions
self._num_cycles = num_cycles
self._num_stages = num_stages
# Tensorflow specific details
self._parallel_iterations = parallel_iterations
self._swap_memory = swap_memory
# Define the iteration method
def time_to_param_index(t):
return t % self._num_stages
self._cell.time_to_param_index = time_to_param_index
self._t = tf.constant(0)
# define condition and body for while loop
self._cond = lambda t, *inputs: t < self._num_stages * self._num_cycles
def body(t, *inputs):
cell_outputs = self.cell.call(t, inputs)
outputs = [tf.concat([i, tf.expand_dims(j, 0)], axis=0) for (i, j) in zip(inputs, cell_outputs)]
return [t+1] + outputs
self._body = body
self._inputs = [tf.expand_dims(inp, 0) for inp in self.cell.inputs]
self._input_shapes = [tf.TensorShape([None]).concatenate(inp.get_shape()) for inp in self._cell.inputs]
self._outputs = tf.while_loop(
self._cond, self._body, loop_vars=[self._t] + self._inputs,
shape_invariants=[self._t.get_shape()] + self._input_shapes,
parallel_iterations=self._parallel_iterations,
swap_memory=self._swap_memory)
def get_outputs(self, stage_outputs=False):
""" Get the outputs of the variational network.
Args:
stage_outputs: get all stage outputs (optional) default=False
"""
if stage_outputs:
return self._outputs[1:]
else:
return [out[-1] for out in self._outputs[1:]]
@property
def cell(self):
return self._cell
@cell.setter
def cell(self, value):
assert isinstance(value, VnBasicCell)
self._cell = value
|
compressai/utils/bench/__main__.py | Conzel/CompressAI | 515 | 11109231 | <filename>compressai/utils/bench/__main__.py
# Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Collect performance metrics of published traditional or end-to-end image
codecs.
"""
import argparse
import json
import multiprocessing as mp
import sys
from collections import defaultdict
from itertools import starmap
from pathlib import Path
from typing import List
from .codecs import AV1, BPG, HM, JPEG, JPEG2000, TFCI, VTM, Codec, WebP
# from torchvision.datasets.folder
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
codecs = [JPEG, WebP, JPEG2000, BPG, TFCI, VTM, HM, AV1]
# we need the quality index (not value) to compute the stats later
def func(codec, i, *args):
rv = codec.run(*args)
return i, rv
def collect(
codec: Codec,
dataset: str,
qps: List[int],
metrics: List[str],
num_jobs: int = 1,
):
if not Path(dataset).is_dir():
raise OSError(f"No such directory: {dataset}")
filepaths = []
for ext in IMG_EXTENSIONS:
filepaths.extend(Path(dataset).rglob(f"*{ext}"))
pool = mp.Pool(num_jobs) if num_jobs > 1 else None
if len(filepaths) == 0:
print("No images found in the dataset directory")
sys.exit(1)
args = [(codec, i, f, q, metrics) for i, q in enumerate(qps) for f in filepaths]
if pool:
rv = pool.starmap(func, args)
else:
rv = list(starmap(func, args))
results = [defaultdict(float) for _ in range(len(qps))]
for i, metrics in rv:
for k, v in metrics.items():
results[i][k] += v
# aggregate results for all images
for i, _ in enumerate(results):
for k, v in results[i].items():
results[i][k] = v / len(filepaths)
# list of dict -> dict of list
out = defaultdict(list)
for r in results:
for k, v in r.items():
out[k].append(v)
return out
def setup_args():
description = "Collect codec metrics."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="codec", help="Select codec")
subparsers.required = True
return parser, subparsers
def setup_common_args(parser):
parser.add_argument("dataset", type=str)
parser.add_argument(
"-j",
"--num-jobs",
type=int,
metavar="N",
default=1,
help="number of parallel jobs (default: %(default)s)",
)
parser.add_argument(
"-q",
"--qps",
dest="qps",
metavar="Q",
default=[75],
nargs="+",
type=int,
help="list of quality/quantization parameter (default: %(default)s)",
)
parser.add_argument(
"--metrics",
dest="metrics",
default=["psnr", "ms-ssim"],
nargs="+",
help="do not return PSNR and MS-SSIM metrics (use for very small images)",
)
def main(argv):
parser, subparsers = setup_args()
for c in codecs:
cparser = subparsers.add_parser(c.__name__.lower(), help=f"{c.__name__}")
setup_common_args(cparser)
c.setup_args(cparser)
args = parser.parse_args(argv)
codec_cls = next(c for c in codecs if c.__name__.lower() == args.codec)
codec = codec_cls(args)
results = collect(
codec,
args.dataset,
args.qps,
args.metrics,
args.num_jobs,
)
output = {
"name": codec.name,
"description": codec.description,
"results": results,
}
print(json.dumps(output, indent=2))
if __name__ == "__main__":
main(sys.argv[1:])
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/issue143/not-a-package/test.py | kokosing/hue | 5,079 | 11109248 | <gh_stars>1000+
def test():
raise Exception("do not run")
|
extraction/convert_anchor_tags_to_wikidata.py | BenHammersley/deeptype | 614 | 11109254 | import argparse
from os.path import join
from os import makedirs
import marisa_trie
import numpy as np
from wikidata_linker_utils.bash import count_lines
from wikidata_linker_utils.progressbar import get_progress_bar
from wikidata_linker_utils.wikipedia import match_wikipedia_to_wikidata, load_redirections
from wikidata_linker_utils.successor_mask import construct_mapping, construct_anchor_trie
def parse_args(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument("wikipedia2wikidata_trie",
help="Location of wikipedia -> wikidata mapping trie.")
parser.add_argument("prefix", type=str,
help="What language is being processed, e.g. enwiki, frwiki, etc.")
parser.add_argument("anchor_tags", type=str,
help="Location where anchor tags were saved (tsv).")
parser.add_argument("redirections", type=str,
help="Location where redirections were saved (tsv).")
parser.add_argument("out", type=str,
help="Directory to save trie/data in.")
return parser.parse_args(argv)
def main():
args = parse_args()
makedirs(args.out, exist_ok=True)
wikipedia2wikidata_trie = marisa_trie.RecordTrie('i').load(
args.wikipedia2wikidata_trie
)
print('loaded trie')
redirections = load_redirections(args.redirections)
anchor_trie = construct_anchor_trie(
anchor_tags=args.anchor_tags,
wikipedia2wikidata_trie=wikipedia2wikidata_trie,
redirections=redirections,
prefix=args.prefix
)
anchor_trie.save(join(args.out, 'trie.marisa'))
(
(
trie_index2indices_offsets,
trie_index2indices_values,
trie_index2indices_counts
),
(
trie_index2contexts_offsets,
trie_index2contexts_values,
trie_index2contexts_counts
)
) = construct_mapping(
anchor_tags=args.anchor_tags,
wikipedia2wikidata_trie=wikipedia2wikidata_trie,
redirections=redirections,
prefix=args.prefix,
anchor_trie=anchor_trie
)
np.save(join(args.out, "trie_index2indices_offsets.npy"), trie_index2indices_offsets)
np.save(join(args.out, "trie_index2indices_values.npy"), trie_index2indices_values)
np.save(join(args.out, "trie_index2indices_counts.npy"), trie_index2indices_counts)
np.save(join(args.out, "trie_index2contexts_offsets.npy"), trie_index2contexts_offsets)
np.save(join(args.out, "trie_index2contexts_values.npy"), trie_index2contexts_values)
np.save(join(args.out, "trie_index2contexts_counts.npy"), trie_index2contexts_counts)
if __name__ == "__main__":
main()
|
test_project/views.py | martey/django-wysiwyg | 128 | 11109290 | from django.shortcuts import render_to_response
def basic_test(request):
return render_to_response("basic_test.html")
|
easyfsl/methods/abstract_meta_learner.py | lnowakow/easy-few-shot-learning | 208 | 11109326 | <gh_stars>100-1000
from abc import abstractmethod
from pathlib import Path
from statistics import mean
from typing import Union
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from easyfsl.utils import sliding_average, compute_backbone_output_shape
class AbstractMetaLearner(nn.Module):
"""
Abstract class providing methods usable by all few-shot classification algorithms
"""
def __init__(self, backbone: nn.Module):
super().__init__()
self.backbone = backbone
self.backbone_output_shape = compute_backbone_output_shape(backbone)
self.feature_dimension = self.backbone_output_shape[0]
self.loss_function = nn.CrossEntropyLoss()
self.best_validation_accuracy = 0.0
self.best_model_state = None
# pylint: disable=all
@abstractmethod
def forward(
self,
query_images: torch.Tensor,
) -> torch.Tensor:
"""
Predict classification labels.
Args:
query_images: images of the query set
Returns:
a prediction of classification scores for query images
"""
raise NotImplementedError(
"All few-shot algorithms must implement a forward method."
)
@abstractmethod
def process_support_set(
self,
support_images: torch.Tensor,
support_labels: torch.Tensor,
):
"""
Harness information from the support set, so that query labels can later be predicted using
a forward call
Args:
support_images: images of the support set
support_labels: labels of support set images
"""
raise NotImplementedError(
"All few-shot algorithms must implement a process_support_set method."
)
# pylint: enable=all
def evaluate_on_one_task(
self,
support_images: torch.Tensor,
support_labels: torch.Tensor,
query_images: torch.Tensor,
query_labels: torch.Tensor,
) -> [int, int]:
"""
Returns the number of correct predictions of query labels, and the total number of
predictions.
"""
self.process_support_set(support_images.cuda(), support_labels.cuda())
return (
torch.max(
self(query_images.cuda()).detach().data,
1,
)[1]
== query_labels.cuda()
).sum().item(), len(query_labels)
def evaluate(self, data_loader: DataLoader) -> float:
"""
Evaluate the model on few-shot classification tasks
Args:
data_loader: loads data in the shape of few-shot classification tasks
Returns:
average classification accuracy
"""
# We'll count everything and compute the ratio at the end
total_predictions = 0
correct_predictions = 0
# eval mode affects the behaviour of some layers (such as batch normalization or dropout)
# no_grad() tells torch not to keep in memory the whole computational graph
self.eval()
with torch.no_grad():
with tqdm(
enumerate(data_loader), total=len(data_loader), desc="Evaluation"
) as tqdm_eval:
for _, (
support_images,
support_labels,
query_images,
query_labels,
_,
) in tqdm_eval:
correct, total = self.evaluate_on_one_task(
support_images, support_labels, query_images, query_labels
)
total_predictions += total
correct_predictions += correct
# Log accuracy in real time
tqdm_eval.set_postfix(
accuracy=correct_predictions / total_predictions
)
return correct_predictions / total_predictions
def compute_loss(
self, classification_scores: torch.Tensor, query_labels: torch.Tensor
) -> torch.Tensor:
"""
Apply the method's criterion to compute the loss between predicted classification scores,
and query labels.
We do this in a separate function because some few-shot learning algorithms don't apply
the loss function directly to classification scores and query labels. For instance, Relation
Networks use Mean Square Error, so query labels need to be put in the one hot encoding.
Args:
classification_scores: predicted classification scores of shape (n_query, n_classes)
query_labels: ground truth labels. 1-dim tensor of length n_query
Returns:
loss
"""
return self.loss_function(classification_scores, query_labels)
def fit_on_task(
self,
support_images: torch.Tensor,
support_labels: torch.Tensor,
query_images: torch.Tensor,
query_labels: torch.Tensor,
optimizer: optim.Optimizer,
) -> float:
"""
Predict query set labels and updates model's parameters using classification loss
Args:
support_images: images of the support set
support_labels: labels of support set images (used in the forward pass)
query_images: query set images
query_labels: labels of query set images (only used for loss computation)
optimizer: optimizer to train the model
Returns:
the value of the classification loss (for reporting purposes)
"""
self.train()
optimizer.zero_grad()
self.process_support_set(support_images.cuda(), support_labels.cuda())
classification_scores = self(query_images.cuda())
loss = self.compute_loss(classification_scores, query_labels.cuda())
loss.backward()
optimizer.step()
return loss.item()
def fit(
self,
train_loader: DataLoader,
optimizer: optim.Optimizer,
val_loader: DataLoader = None,
validation_frequency: int = 1000,
) -> float:
"""
Train the model on few-shot classification tasks.
Args:
train_loader: loads training data in the shape of few-shot classification tasks
optimizer: optimizer to train the model
val_loader: loads data from the validation set in the shape of few-shot classification
tasks
validation_frequency: number of training episodes between two validations
Returns:
average loss
"""
log_update_frequency = 10
all_loss = []
with tqdm(
enumerate(train_loader), total=len(train_loader), desc="Meta-Training"
) as tqdm_train:
for episode_index, (
support_images,
support_labels,
query_images,
query_labels,
_,
) in tqdm_train:
loss_value = self.fit_on_task(
support_images,
support_labels,
query_images,
query_labels,
optimizer,
)
all_loss.append(loss_value)
# Log training loss in real time
if episode_index % log_update_frequency == 0:
tqdm_train.set_postfix(
loss=sliding_average(all_loss, log_update_frequency)
)
# Validation
if val_loader:
if (episode_index + 1) % validation_frequency == 0:
self.validate(val_loader)
return mean(all_loss)
def validate(self, val_loader: DataLoader) -> float:
"""
Validate the model on the validation set.
Args:
val_loader: loads data from the validation set in the shape of few-shot classification
tasks
Returns:
average classification accuracy on the validation set
"""
validation_accuracy = self.evaluate(val_loader)
print(f"Validation accuracy: {(100 * validation_accuracy):.2f}%")
# If this was the best validation performance, we save the model state
if validation_accuracy > self.best_validation_accuracy:
print("Best validation accuracy so far!")
self.best_model_state = self.state_dict()
self.best_validation_accuracy = validation_accuracy
return validation_accuracy
def _check_that_best_state_is_defined(self):
"""
Raises:
AttributeError: if self.best_model_state is None, i.e. if no best sate has been
defined yet.
"""
if not self.best_model_state:
raise AttributeError(
"There is not best state defined for this model. "
"You need to train the model using validation to define a best state."
)
def restore_best_state(self):
"""
Retrieves the state (i.e. a dictionary of model parameters) of the model at the time it
obtained its best performance on the validation set.
"""
self._check_that_best_state_is_defined()
self.load_state_dict(self.best_model_state)
def dump_best_state(self, output_path: Union[Path, str]):
"""
Retrieves the state (i.e. a dictionary of model parameters) of the model at the time it
obtained its best performance on the validation set.
Args:
output_path: path to the output file. Common practice in PyTorch is to save models
using either a .pt or .pth file extension.
"""
self._check_that_best_state_is_defined()
torch.save(self.best_model_state, output_path)
|
mmdeploy/codebase/mmdet/models/dense_heads/ssd_head.py | grimoire/mmdeploy | 746 | 11109328 | <gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.codebase.mmdet.core.ops import (ncnn_detection_output_forward,
ncnn_prior_box_forward)
from mmdeploy.core import FUNCTION_REWRITER
from mmdeploy.utils import is_dynamic_shape
@FUNCTION_REWRITER.register_rewriter(
func_name='mmdet.models.dense_heads.SSDHead.get_bboxes', backend='ncnn')
def ssd_head__get_bboxes__ncnn(ctx,
self,
cls_scores,
bbox_preds,
img_metas,
with_nms=True,
cfg=None,
**kwargs):
"""Rewrite `get_bboxes` of SSDHead for ncnn backend.
This rewriter using ncnn PriorBox and DetectionOutput layer to
support dynamic deployment, and has higher speed.
Args:
ctx (ContextCaller): The context with additional information.
cls_scores (list[Tensor]): Box scores for each level in the
feature pyramid, has shape
(N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each
level in the feature pyramid, has shape
(N, num_anchors * 4, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
with_nms (bool): If True, do nms before return boxes.
Default: True.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
Default: None.
Returns:
Tensor: outputs, shape is [N, num_det, 6].
"""
assert len(cls_scores) == len(bbox_preds)
deploy_cfg = ctx.cfg
is_dynamic_flag = is_dynamic_shape(deploy_cfg)
num_levels = len(cls_scores)
aspect_ratio = [
ratio[ratio > 1].detach().cpu().numpy()
for ratio in self.anchor_generator.ratios
]
strides = self.anchor_generator.strides
min_sizes = self.anchor_generator.base_sizes
if is_dynamic_flag:
max_sizes = min_sizes[1:] + img_metas[0]['img_shape'][0:1].tolist()
img_height = img_metas[0]['img_shape'][0].item()
img_width = img_metas[0]['img_shape'][1].item()
else:
max_sizes = min_sizes[1:] + img_metas[0]['img_shape'][0:1]
img_height = img_metas[0]['img_shape'][0]
img_width = img_metas[0]['img_shape'][1]
# if no reshape, concat will be error in ncnn.
mlvl_anchors = [
ncnn_prior_box_forward(cls_scores[i], aspect_ratio[i], img_height,
img_width, strides[i][0], strides[i][1],
max_sizes[i:i + 1],
min_sizes[i:i + 1]).reshape(1, 2, -1)
for i in range(num_levels)
]
mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
cfg = self.test_cfg if cfg is None else cfg
assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(mlvl_anchors)
batch_size = 1
mlvl_valid_bboxes = []
mlvl_scores = []
for level_id, cls_score, bbox_pred in zip(
range(num_levels), mlvl_cls_scores, mlvl_bbox_preds):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(0, 2, 3,
1).reshape(batch_size, -1,
self.cls_out_channels)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(batch_size, -1, 4)
mlvl_valid_bboxes.append(bbox_pred)
mlvl_scores.append(cls_score)
# ncnn DetectionOutput layer uses background class at 0 position, but
# in mmdetection, background class is at self.num_classes position.
# We should adapt for ncnn.
batch_mlvl_valid_bboxes = torch.cat(mlvl_valid_bboxes, dim=1)
batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
if self.use_sigmoid_cls:
batch_mlvl_scores = batch_mlvl_scores.sigmoid()
else:
batch_mlvl_scores = batch_mlvl_scores.softmax(-1)
batch_mlvl_anchors = torch.cat(mlvl_anchors, dim=2)
batch_mlvl_scores = torch.cat([
batch_mlvl_scores[:, :, self.num_classes:],
batch_mlvl_scores[:, :, 0:self.num_classes]
],
dim=2)
batch_mlvl_valid_bboxes = batch_mlvl_valid_bboxes.reshape(
batch_size, 1, -1)
batch_mlvl_scores = batch_mlvl_scores.reshape(batch_size, 1, -1)
batch_mlvl_anchors = batch_mlvl_anchors.reshape(batch_size, 2, -1)
post_params = get_post_processing_params(deploy_cfg)
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
output__ncnn = ncnn_detection_output_forward(
batch_mlvl_valid_bboxes, batch_mlvl_scores, batch_mlvl_anchors,
score_threshold, iou_threshold, pre_top_k, keep_top_k,
self.num_classes + 1)
return output__ncnn
|
test/www/python/postdata.py | henricazottes/zero | 6,247 | 11109339 | from flask import request
def handler():
if request.method == 'POST':
email = request.form.get('email')
password = request.form.get('password')
return 'Got ' + email
return "POST email and password here" |
website/content/02-introduction-to-python/175-running-code/code/formatting_example.py | profread/python | 389 | 11109408 | # Use \n to add a new line, before, in the middle of, or after a string.
print("\nExtra New Line Before")
print("One Print\nTwo New Lines!")
print("Extra New Line After\n")
# Use \t to add a tab.
print("\t Here's some tabbed output.")
# Or, combine both!
print("\nOne Print\n\tOne Tab") |
library/test/test_eof.py | creativemindplus/skybison | 278 | 11109428 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
# WARNING: This is a temporary copy of code from the cpython library to
# facilitate bringup. Please file a task for anything you change!
# flake8: noqa
# fmt: off
# TODO(T39919550): unfork this file
"""test script for a few new invalid token catches"""
from test import support
import unittest
class EOFTestCase(unittest.TestCase):
def test_EOFC(self):
expect = "EOL while scanning string literal (<string>, line 1)"
try:
eval("""'this is a test\
""")
except SyntaxError as msg:
self.assertEqual(str(msg), expect)
else:
raise support.TestFailed
def test_EOFS(self):
expect = ("EOF while scanning triple-quoted string literal "
"(<string>, line 1)")
try:
eval("""'''this is a test""")
except SyntaxError as msg:
self.assertEqual(str(msg), expect)
else:
raise support.TestFailed
def test_line_continuation_EOF(self):
"""A continuation at the end of input must be an error; bpo2180."""
expect = 'unexpected EOF while parsing (<string>, line 1)'
with self.assertRaises(SyntaxError) as excinfo:
exec('x = 5\\')
self.assertEqual(str(excinfo.exception), expect)
with self.assertRaises(SyntaxError) as excinfo:
exec('\\')
self.assertEqual(str(excinfo.exception), expect)
if __name__ == "__main__":
unittest.main()
|
train/dag/gen_phrase.py | chuan717/Pinyin2Hanzi | 473 | 11109434 | <gh_stars>100-1000
# coding: utf-8
from __future__ import (print_function, unicode_literals)
import sys
import json
sys.path = ['../..'] + sys.path
from Pinyin2Hanzi import util
from ChineseTone import PinyinHelper, PinyinFormat
import jieba
def cut(s):
return jieba.cut(s, cut_all=False)
def writejson2file(obj, filename):
with open(filename, 'w') as outfile:
data = json.dumps(obj, indent=4, sort_keys=True)
outfile.write(data)
def readdatafromfile(filename):
with open(filename) as outfile:
return json.load(outfile)
result = {}
max_num = 0.
min_num = 100000000000000.
for line in open('./word.txt'):
line = util.as_text(line.strip())
if '=' not in line:
continue
word, num = line.split('=')
num = float(num)
pinyin_list = PinyinHelper.convertToPinyinFromSentence(word, segment=cut)
pinyins = ','.join(pinyin_list)
pinyins = util.simplify_pinyin(pinyins)
result.setdefault(pinyins, {})
result[pinyins].setdefault(word, 0)
result[pinyins][word] += num
max_num = max(max_num, result[pinyins][word])
min_num = min(min_num, result[pinyins][word])
for line in open('./phrase.txt'):
line = util.as_text(line.strip())
if '=' not in line:
continue
word, _ = line.split('=')
num = 1.
pinyin_list = PinyinHelper.convertToPinyinFromSentence(word, segment=cut)
pinyins = ','.join(pinyin_list)
pinyins = util.simplify_pinyin(pinyins)
result.setdefault(pinyins, {})
result[pinyins].setdefault(word, 0)
result[pinyins][word] += num
max_num = max(max_num, result[pinyins][word])
min_num = min(min_num, result[pinyins][word])
result['max_num'] = max_num
result['min_num'] = min_num
writejson2file(result, './result/dag_phrase.json')
# with open('./result/dag_phrase.txt', 'w') as output:
# s = ''
# for pinyin in result:
# for word in result[pinyin]:
# num = result[pinyin][word]
# s = s + pinyin + '=' + word + '=' + str(num) + '\n'
# output.write(s) |
scripts/update_changelog_link.py | normalchaos/intellij-rust | 4,168 | 11109478 | <reponame>normalchaos/intellij-rust<filename>scripts/update_changelog_link.py
import re
from datetime import date
from common import get_patch_version
from git import git_command
PLUGIN_XML = "plugin/src/main/resources/META-INF/plugin.xml"
if __name__ == '__main__':
with open(PLUGIN_XML) as f:
text = f.read()
today = date.today()
version = get_patch_version() - 1
new_text = re.sub(r"https://intellij-rust\.github\.io/.*\.html",
f"https://intellij-rust.github.io/{today.year}/{today.month:02d}/{today.day:02d}/changelog-{version}.html",
text)
with (open(PLUGIN_XML, mode="w")) as f:
f.write(new_text)
git_command("add", PLUGIN_XML)
# [skip ci] says GitHub not to emit `push` and `pull_request` events, i.e. don't trigger `check` workflow.
# It allows not to occupy GitHub Actions agents that are needed for building release builds
# See https://github.blog/changelog/2021-02-08-github-actions-skip-pull-request-and-push-workflows-with-skip-ci/
git_command("commit", "-m", "Changelog\n\n[skip ci]")
head = git_command("rev-parse", "HEAD")
release_branch = f"release-{version}"
git_command("checkout", release_branch)
git_command("cherry-pick", head)
git_command("push", "origin", "master")
git_command("push", "origin", release_branch)
|
academicstoday_project/landpage/tests/test_google.py | LeeDoona/EasyGrading | 146 | 11109495 | <reponame>LeeDoona/EasyGrading
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from landpage.views import google
import json
class GoogleVerifyTest(TestCase):
"""
python manage.py test landpage.tests.test_google
"""
def tearDown(self):
pass
def setUp(self):
pass
def test_url_resolves_to_google_verify_page_view(self):
found = resolve('/googlee81f1c16590924d1.html')
self.assertEqual(found.func,google.google_verify_page)
def test_google_verify_page_returns_correct_html(self):
client = Client()
response = client.post(
'/googlee81f1c16590924d1.html',
)
self.assertEqual(response.status_code, 200)
self.assertIn(b'googlee81f1c16590924d1.html',response.content)
|
tests/test_downshiftology.py | mathiazom/recipe-scrapers | 811 | 11109502 | from recipe_scrapers.downshiftology import Downshiftology
from tests import ScraperTest
class TestDownshiftologyScraper(ScraperTest):
scraper_class = Downshiftology
def test_host(self):
self.assertEqual("downshiftology.com", self.harvester_class.host())
def test_author(self):
self.assertEqual("<NAME>", self.harvester_class.author())
def test_title(self):
self.assertEqual("Greek Chicken Kabobs", self.harvester_class.title())
def test_total_time(self):
self.assertEqual(55, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 serving(s)", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://i2.wp.com/www.downshiftology.com/wp-content/uploads/2020/09/Greek-Chicken-Kabobs-3.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertCountEqual(
[
"1/4 cup olive oil",
"2 tablespoons red wine vinegar",
"3 tablespoons lemon juice",
"1 teaspoon Dijon mustard",
"3 garlic cloves (minced)",
"1 teaspoon dried oregano",
"1/2 teaspoon salt",
"1/4 teaspoon black pepper",
"1 1/2 pounds boneless skinless chicken breasts (about 3 large chicken breasts, cut into 1 1/2-inch pieces.)",
"1 red bell pepper (seeded, cut into 1 1/2-Inch pieces)",
"1 yellow bell pepper (seeded, cut into 1 1/2-inch pieces)",
"1 red onion (cut into 1 1/2-inch chunks)",
"1 zucchini (sliced)",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
self.assertEqual(
"Make marinade\nTo make the marinade, whisk together the olive oil, red wine vinegar, lemon juice, Dijon mustard, minced garlic, dried oregano, salt, and pepper.\nMarinate chicken\nPlace chicken pieces in a glass dish and pour the marinade over the chicken. Cover and marinate in the fridge for at least one hour.\nThread skewers\nLight a gas or charcoal grill on medium-high heat. Thread the skewers with pieces of red onion, chicken, zucchini, and bell pepper. You can alternate the order.\nGrill kabobs\nPlace the kabobs on the preheated grill, and cook about 5-7 minutes per side. The kabobs are done when the chicken is cooked through and the vegetables are lightly charred, about 15 minutes.\nServe with lemon wedges and tzatziki sauce.",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(5.0, self.harvester_class.ratings())
|
Modules/object_detection/py_nodes/fisheye_undistort.py | 473867143/Prometheus | 1,217 | 11109505 | <gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from sensor_msgs.msg import Image
from geometry_msgs.msg import Pose, Point, Quaternion
from cv_bridge import CvBridge
from std_msgs.msg import String
from std_msgs.msg import Bool
import numpy as np
import cv2
import os
import yaml
import math
import glob
camera_matrix = np.array([[274.4425695994789, 0.0, 422.808978794362], [0.0, 273.66842474114827, 407.3969079515446], [0.0, 0.0, 1.0]], np.float32)
distortion_coefficients = np.array([[0.01841136025813201], [-0.006751972660967855], [0.009935398363079766], [-0.008198696622455868]], np.float32)
rospy.init_node('fisheye_undistort', anonymous=True)
img_pub = rospy.Publisher('/camera/fisheye1/image_undistort', Image, queue_size = 10)
def image_callback(imgmsg):
bridge = CvBridge()
frame = bridge.imgmsg_to_cv2(imgmsg, "bgr8")
# processing
DIM = (848, 800)
map1, map2 = cv2.fisheye.initUndistortRectifyMap(camera_matrix, distortion_coefficients, np.eye(3), camera_matrix, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
# end
"""
h, w = frame.shape[:2]
img_resize = 360
if h > w:
h = int(float(h) / w * img_resize)
w = img_resize
else:
w = int(float(w) / h * img_resize)
h = img_resize
frame = cv2.resize(frame, (w, h))
"""
# cv2.imshow("undistorted_img", undistorted_img)
# cv2.imshow("area", area)
# cv2.waitKey(10)
img_pub.publish(bridge.cv2_to_imgmsg(undistorted_img, "bgr8"))
def fisheye_undistort(topic_name):
rospy.Subscriber(topic_name, Image, image_callback)
rospy.spin()
def get_K_and_D(checkerboard, imgsPath):
CHECKERBOARD = checkerboard
subpix_criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC+cv2.fisheye.CALIB_CHECK_COND+cv2.fisheye.CALIB_FIX_SKEW
objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
_img_shape = None
objpoints = []
imgpoints = []
images = glob.glob(imgsPath)
for fname in images:
img = cv2.imread(fname)
if _img_shape == None:
_img_shape = img.shape[:2]
else:
assert _img_shape == img.shape[:2], "All images must share the same size."
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD,cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),subpix_criteria)
imgpoints.append(corners)
N_OK = len(objpoints)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
rms, _, _, _, _ = cv2.fisheye.calibrate(
objpoints,
imgpoints,
gray.shape[::-1],
K,
D,
rvecs,
tvecs,
calibration_flags,
(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
)
DIM = _img_shape[::-1]
print("Found " + str(N_OK) + " valid images for calibration")
print("DIM=" + str(_img_shape[::-1]))
print("K=np.array(" + str(K.tolist()) + ")")
print("D=np.array(" + str(D.tolist()) + ")")
return DIM, K, D
if __name__ == '__main__':
DIM, K, D = get_K_and_D((6, 9), os.path.dirname(os.path.abspath(__file__)) + '/checkerboard_imgs/*.png')
subscriber = rospy.get_param('~camera_topic', '/camera/fisheye1/image_raw')
camera_matrix = K
distortion_coefficients = D
try:
fisheye_undistort(subscriber)
except rospy.ROSInterruptException:
pass
|
stix_shifter_modules/arcsight/stix_transmission/status_connector.py | pyromaneact/stix-shifter | 129 | 11109528 | <reponame>pyromaneact/stix-shifter
import json
import math
from stix_shifter_utils.modules.base.stix_transmission.base_status_connector import BaseStatusConnector
from stix_shifter_utils.modules.base.stix_transmission.base_status_connector import Status
from enum import Enum
from stix_shifter_utils.utils.error_response import ErrorResponder
DEFAULT_LIMIT = 10000
class ArcsightStatus(Enum):
# STARTING, RUNNING, TIMEOUT, COMPLETE, STOP. ERROR
STARTING = 'starting'
RUNNING = 'running'
TIMEOUT = 'timeout'
COMPLETE = 'complete'
STOP = 'stop'
ERROR = 'error'
class StatusConnector(BaseStatusConnector):
def __init__(self, api_client):
self.api_client = api_client
# Map data source status to connector status
@staticmethod
def __getStatus(arcsight_status):
"""
Return the status of the search id
:param arcsight_status: str, status
:return: str
"""
switcher = {
ArcsightStatus.STARTING.value: Status.RUNNING,
ArcsightStatus.RUNNING.value: Status.RUNNING,
ArcsightStatus.TIMEOUT.value: Status.TIMEOUT,
ArcsightStatus.COMPLETE.value: Status.COMPLETED,
ArcsightStatus.STOP.value: Status.CANCELED,
ArcsightStatus.ERROR.value: Status.ERROR
}
return switcher.get(arcsight_status).value
def create_status_connection(self, search_id):
"""
Fetching the progress and the status of the search id
:param search_id: str, search id
:return: dict
"""
return_obj = dict()
limit = DEFAULT_LIMIT
user_limit = None
try:
search_id_length = len(search_id.split(':'))
search_id_values = search_id.split(':')
if search_id_length == 2:
search_session_id, user_session_id = search_id_values
elif search_id_length == 3:
search_session_id, user_session_id, user_limit = search_id_values
else:
raise SyntaxError("Invalid search_id format : " + str(search_id))
if user_limit and int(user_limit) <= DEFAULT_LIMIT:
limit = user_limit
response = self.api_client.get_search_status(search_session_id, user_session_id)
raw_response = response.read()
response_code = response.code
self.status_progress(return_obj, response_code, raw_response, limit)
except Exception as err:
response_error = err
ErrorResponder.fill_error(return_obj, response_error, ['message'])
return return_obj
def status_progress(self, return_obj, response_code, raw_response, limit):
"""
status progress calculation
:param return_obj: dict, building return response dict
:param raw_response: str, Api response,
:param response_code: int, Api call response code
:param limit: int, limit for status calculation """
if 199 < response_code < 300:
response_dict = json.loads(raw_response)
return_obj['success'] = True
return_obj['status'] = self.__getStatus(response_dict['status'])
results = int(response_dict['hit'])
if return_obj['status'] == 'COMPLETED':
return_obj['progress'] = 100
elif return_obj['status'] == 'RUNNING':
progress = (results / int(limit)) * 100
progress_floor = math.floor(progress)
return_obj['progress'] = progress_floor
if return_obj['progress'] >= 100:
return_obj['progress'] = 100
return_obj['status'] = 'COMPLETED'
else:
return_obj['progress'] = 0
# arcsight logger error codes - currently unavailable state
elif response_code in [500, 503]:
response_string = raw_response.decode()
ErrorResponder.fill_error(return_obj, response_string, ['message'])
elif isinstance(json.loads(raw_response), dict):
response_error = json.loads(raw_response)
response_dict = response_error['errors'][0]
ErrorResponder.fill_error(return_obj, response_dict, ['message'])
else:
raise Exception(raw_response)
|
fedlab/core/network.py | SMILELab-FL/FedLab | 171 | 11109558 | <filename>fedlab/core/network.py
# Copyright 2021 Peng Cheng Laboratory (http://www.szpclab.com/) and FedLab Authors (smilelab.group)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch.distributed as dist
class DistNetwork(object):
"""Manage ``torch.distributed`` network.
Args:
address (tuple): Address of this server in form of ``(SERVER_ADDR, SERVER_IP)``
world_size (int): the size of this distributed group (including server).
rank (int): the rank of process in distributed group.
ethernet (str)
dist_backend (str or torch.distributed.Backend): :attr:`backend` of ``torch.distributed``. Valid values include ``mpi``, ``gloo``, and ``nccl``. Default: ``"gloo"``.
"""
def __init__(self,
address,
world_size,
rank,
ethernet=None,
dist_backend="gloo"):
super(DistNetwork, self).__init__()
self.address = address
self.rank = rank
self.world_size = world_size
self.dist_backend = dist_backend
self.ethernet = ethernet
def init_network_connection(self):
"""Initialize ``torch.distributed`` communication group"""
print(self.__str__())
if self.ethernet is not None:
os.environ["GLOO_SOCKET_IFNAME"] = self.ethernet
dist.init_process_group(
backend=self.dist_backend,
init_method="tcp://{}:{}".format(self.address[0], self.address[1]),
rank=self.rank,
world_size=self.world_size,
)
def close_network_connection(self):
"""Destroy current ``torch.distributed`` process group"""
if dist.is_initialized():
dist.destroy_process_group()
def __str__(self):
info_str = "torch.distributed connection is initializing with server ip address {}:{}, rank {}, world size: {}, backend {}, ethernet {}.".format(
self.address[0],
self.address[1],
self.rank,
self.world_size,
self.dist_backend,
self.ethernet,
)
return info_str
|
tests/clpy_tests/opencl_tests/test_rollaxis.py | fixstars/clpy | 142 | 11109561 | # -*- coding: utf-8 -*-
import unittest
import clpy
import numpy
class TestRollaxis(unittest.TestCase):
"""test class of rollaxis"""
def test_import(self):
self.assertTrue(True) # Always OK if no exeption from import
def test_2_3_matrix(self):
npA = numpy.array([[1, 2, 3], [4, 5, 6]])
expectedA = numpy.rollaxis(npA, 1, 0)
clpA = clpy.array([[1, 2, 3], [4, 5, 6]])
actualA = clpy.rollaxis(clpA, 1, 0)
self.assertTrue(numpy.allclose(expectedA, actualA.get()))
def test_2_3_4_matrix(self):
npA = numpy.array([[[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], [
[4, 5, 6, 7], [5, 6, 7, 8], [6, 7, 8, 9]]])
expectedA = numpy.rollaxis(npA, 1, 0)
clpA = clpy.array([[[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]], [
[4, 5, 6, 7], [5, 6, 7, 8], [6, 7, 8, 9]]])
actualA = clpy.rollaxis(clpA, 1, 0)
self.assertTrue(numpy.allclose(expectedA, actualA.get()))
if __name__ == "__main__":
unittest.main()
|
lib/datasets/tless/handle_rendering_data.py | bertid/clean-pvnet | 284 | 11109601 | <reponame>bertid/clean-pvnet
import os
import glob
from PIL import Image
import numpy as np
import tqdm
from lib.utils.renderer import render_utils
from lib.utils.base_utils import read_pickle
import json
import cv2
blender = '/home/pengsida/Software/blender-2.79a-linux-glibc219-x86_64/blender'
blank_blend = 'lib/datasets/tless/blank.blend'
backend = 'lib/datasets/tless/render_backend.py'
data_root = 'data/tless'
ply_path_pattern = os.path.join(data_root, 'models_cad/colobj_{:02}.ply')
output_dir_pattern = os.path.join(data_root, 'renders/{}')
def get_bg_imgs():
bg_path = 'data/tless/bg_imgs.npy'
bg_dataset = 'data/sun/JPEGImages'
if os.path.exists(bg_path):
return
img_paths = glob.glob(os.path.join(bg_dataset, '*.jpg'))
bg_imgs = []
for img_path in tqdm.tqdm(img_paths):
img = Image.open(img_path)
row, col = img.size
if row > 500 and col > 500:
bg_imgs.append(img_path)
np.save(bg_path, bg_imgs)
def get_poses(num_samples):
euler = render_utils.ViewpointSampler.sample_sphere(num_samples)
x = np.random.uniform(-0.01, 0.01, num_samples)
y = np.random.uniform(-0.01, 0.01, num_samples)
z = np.random.uniform(0.18, 0.2, num_samples)
# z = np.random.uniform(0.27, 0.30, num_samples)
translation = np.stack([x, y, z], axis=1)
poses = np.concatenate([euler, translation], axis=1)
np.save('data/tless/poses.npy', poses)
def _render(obj_id, num_syn):
ply_path = ply_path_pattern.format(obj_id)
output_dir = output_dir_pattern.format(obj_id)
os.system('{} {} --background --python {} -- --cad_path {} --output_dir {} --num_syn {}'.format(blender, blank_blend, backend, ply_path, output_dir, num_syn))
depth_paths = glob.glob(os.path.join(output_dir, '*.exr'))
for depth_path in depth_paths:
render_utils.exr_to_png(depth_path)
def render():
get_bg_imgs()
num_syn = 5000
get_poses(num_syn)
obj_ids = [30]
for obj_id in obj_ids:
_render(obj_id, num_syn)
def render_to_coco():
data_root = 'data/tless/renders/'
obj_ids = [i + 1 for i in range(30)]
img_id = 0
ann_id = 0
images = []
annotations = []
for obj_id in tqdm.tqdm(obj_ids):
obj_dir = os.path.join(data_root, str(obj_id))
pkl_paths = glob.glob(os.path.join(obj_dir, '*.pkl'))
for pkl_path in tqdm.tqdm(pkl_paths):
rgb_path = pkl_path.replace('_RT.pkl', '.png')
mask_path = pkl_path.replace('_RT.pkl', '_depth.png')
if not os.path.exists(rgb_path) or not os.path.exists(mask_path):
continue
rgb = Image.open(rgb_path)
img_size = rgb.size
img_id += 1
info = {'rgb_path': rgb_path, 'height': img_size[1], 'width': img_size[0], 'id': img_id}
images.append(info)
K_P = read_pickle(pkl_path)
ann_id += 1
anno = {'mask_path': mask_path, 'image_id': img_id, 'category_id': obj_id, 'id': ann_id}
anno.update({'K': K_P['K'].tolist(), 'pose': K_P['RT'].tolist()})
annotations.append(anno)
categories = [{'supercategory': 'none', 'id': obj_id, 'name': str(obj_id)} for obj_id in obj_ids]
instance = {'images': images, 'annotations': annotations, 'categories': categories}
anno_path = os.path.join(data_root, 'render.json')
with open(anno_path, 'w') as f:
json.dump(instance, f)
|
all/bodies/vision.py | kcorder/autonomous-learning-library | 584 | 11109603 | import torch
from all.core import State, StateArray
from ._body import Body
class FrameStack(Body):
def __init__(self, agent, size=4, lazy=False):
super().__init__(agent)
self._frames = []
self._size = size
self._lazy = lazy
self._to_cache = TensorDeviceCache()
def process_state(self, state):
if not self._frames:
self._frames = [state.observation] * self._size
else:
self._frames = self._frames[1:] + [state.observation]
if self._lazy:
return LazyState.from_state(state, self._frames, self._to_cache)
if isinstance(state, StateArray):
return state.update('observation', torch.cat(self._frames, dim=1))
return state.update('observation', torch.cat(self._frames, dim=0))
class TensorDeviceCache:
'''
To efficiently implement device trasfer of lazy states, this class
caches the transfered tensor so that it is not copied multiple times.
'''
def __init__(self, max_size=16):
self.max_size = max_size
self.cache_data = []
def convert(self, value, device):
cached = None
for el in self.cache_data:
if el[0] is value:
cached = el[1]
break
if cached is not None and cached.device == torch.device(device):
new_v = cached
else:
new_v = value.to(device)
self.cache_data.append((value, new_v))
if len(self.cache_data) > self.max_size:
self.cache_data.pop(0)
return new_v
class LazyState(State):
@classmethod
def from_state(cls, state, frames, to_cache):
state = LazyState(state, device=frames[0].device)
state.to_cache = to_cache
state['observation'] = frames
return state
def __getitem__(self, key):
if key == 'observation':
v = dict.__getitem__(self, key)
if torch.is_tensor(v):
return v
return torch.cat(dict.__getitem__(self, key), dim=0)
return super().__getitem__(key)
def update(self, key, value):
x = {}
for k in self.keys():
if not k == key:
x[k] = dict.__getitem__(self, k)
x[key] = value
state = LazyState.from_state(x, x['observation'], self.to_cache)
return state
def to(self, device):
if device == self.device:
return self
x = {}
for key, value in self.items():
if key == 'observation':
x[key] = [self.to_cache.convert(v, device) for v in value]
# x[key] = [v.to(device) for v in value]#torch.cat(value,axis=0).to(device)
elif torch.is_tensor(value):
x[key] = value.to(device)
else:
x[key] = value
state = LazyState.from_state(x, x['observation'], self.to_cache)
return state
|
QA/co_optimization_parallel_plate_waveguide_test.py | jbellevi/lumopt | 101 | 11109618 | """ Copyright chriskeraly
Copyright (c) 2019 Lumerical Inc. """
import sys
sys.path.append(".")
import os
import numpy as np
from qatools import *
from lumopt.utilities.wavelengths import Wavelengths
from lumopt.utilities.materials import Material
from lumopt.geometries.polygon import FunctionDefinedPolygon
from lumopt.figures_of_merit.modematch import ModeMatch
from lumopt.optimizers.generic_optimizers import ScipyOptimizers
from lumopt.optimization import Optimization
class TestCoOptimizationParallelPlateWaveguide(TestCase):
"""
Unit test for the Optimization class. It performs a co-optimization using a parallel plate waveguide
filled by a dielectric excited with two different polarizations (TE and TM). The waveguide has a gap
that must be filled all the way to maximize transmission.
"""
file_dir = os.path.abspath(os.path.dirname(__file__))
def setUp(self):
# Base simulation project files
self.base_TE_sim = os.path.join(self.file_dir, 'co_optimization_parallel_plate_waveguide_TE_base.fsp')
self.base_TM_sim = os.path.join(self.file_dir, 'co_optimization_parallel_plate_waveguide_TM_base.fsp')
# Simulation bandwidth
self.wavelengths = Wavelengths(start = 1500e-9,
stop = 1600e-9,
points = 12)
# Polygon defining a rectangle that can grow or shrink along the y-axis to fill the gap
self.mesh_del = 10.0e-9; # must be kept in sych with self.base_script
initial_points_y = np.array([1.75 * self.mesh_del, 0.01 * self.mesh_del])
def wall(param = initial_points_y):
assert param.size == 2, "walls defined by two points."
self.wg_gap = 10.0 * self.mesh_del # must be kept in sych
points_x = 0.5 * np.array([-self.wg_gap, self.wg_gap, self.wg_gap, -self.wg_gap])
points_y = np.array([-param[0], -param[1], param[1], param[0]])
polygon_points = [(x, y) for x, y in zip(points_x, points_y)]
return np.array(polygon_points)
self.wg_width = 50.0 * self.mesh_del # must be kept in synch
bounds = [(0.0, self.wg_width / 2.0)] * initial_points_y.size
self.geometry = FunctionDefinedPolygon(func = wall,
initial_params = initial_points_y,
bounds = bounds,
z = 0.0, # must be kept in sych
depth = self.wg_width,
eps_out = Material(base_epsilon = 1.0 ** 2, name = '<Object defined dielectric>', mesh_order = 2), # must be kept in synch
eps_in = Material(base_epsilon = 4.0 ** 2, name = '<Object defined dielectric>', mesh_order = 1), # must be kept in sych
edge_precision = 50,
dx = 1.0e-10)
# Figure of merit
self.fom = ModeMatch(monitor_name = 'fom', # must be kept in sych
mode_number = 1, # must be kept in sych
direction = 'Forward',
multi_freq_src = True,
target_T_fwd = lambda wl: np.ones(wl.size),
norm_p = 1)
# Scipy optimizer
self.optimizer = ScipyOptimizers(max_iter = 5,
method = 'L-BFGS-B',
scaling_factor = 1.0e7,
pgtol = 1.0e-5,
ftol = 1.0e-12,
target_fom = 0.0,
scale_initial_gradient_to = None)
def test_co_optimization_in_2D(self):
print("2D TE-TM co-optimization (use_deps = True): ")
#base_script, wavelengths, fom, geometry, optimizer, use_var_fdtd = False, hide_fdtd_cad = False, use_deps = True, plot_history = True, store_all_simulations = True
optTE = Optimization(base_script = self.base_TE_sim,
wavelengths = self.wavelengths,
fom = self.fom,
geometry = self.geometry,
optimizer = self.optimizer,
use_var_fdtd = False,
hide_fdtd_cad = True,
use_deps = True,
plot_history = False,
store_all_simulations = False)
optTM = Optimization(base_script = self.base_TM_sim,
wavelengths = self.wavelengths,
fom = self.fom,
geometry = self.geometry,
optimizer = self.optimizer,
use_var_fdtd = False,
hide_fdtd_cad = True,
use_deps = True,
plot_history = False,
store_all_simulations = False)
opt = optTE + optTM
fom, params = opt.run()
self.assertGreaterEqual(fom, 1.99990)
reference_value = self.wg_width / 2.0 * self.optimizer.scaling_factor[0]
self.assertAlmostEqual(params[0], reference_value)
self.assertAlmostEqual(params[1], reference_value)
if __name__ == "__main__":
run([__file__])
|
SMPyBandits/Policies/klUCBPlusPlus.py | balbok0/SMPyBandits | 309 | 11109658 | # -*- coding: utf-8 -*-
""" The improved kl-UCB++ policy, for one-parameter exponential distributions.
Reference: [Menard & Garivier, ALT 2017](https://hal.inria.fr/hal-01475078)
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.5"
from math import log
import numpy as np
np.seterr(divide='ignore') # XXX dangerous in general, controlled here!
try:
from .kullback import klucbBern
from .klUCB import klUCB, c
except ImportError:
from kullback import klucbBern
from klUCB import klUCB, c
# --- Numerical functions required for the function g(n) for kl-UCB++
def logplus(x):
"""..math:: \log^+(x) := \max(0, \log(x))."""
return max(0., log(x))
def g(t, T, K):
r"""The exploration function g(t) (for t current time, T horizon, K nb arms), as defined in page 3 of the reference paper.
.. math::
g(t, T, K) &:= \log^+(y (1 + \log^+(y)^2)),\\
y &:= \frac{T}{K t}.
"""
y = T / (K * t)
return max(0., log(y * (1. + max(0., log(y)) ** 2)))
def g_vect(t, T, K):
r"""The exploration function g(t) (for t current time, T horizon, K nb arms), as defined in page 3 of the reference paper, for numpy vectorized inputs.
.. math::
g(t, T, K) &:= \log^+(y (1 + \log^+(y)^2)),\\
y &:= \frac{T}{K t}.
"""
y = T / (K * t)
return np.maximum(0., np.log(y * (1. + np.maximum(0., np.log(y)) ** 2)))
class klUCBPlusPlus(klUCB):
""" The improved kl-UCB++ policy, for one-parameter exponential distributions.
Reference: [Menard & Garivier, ALT 2017](https://hal.inria.fr/hal-01475078)
"""
def __init__(self, nbArms, horizon=None, tolerance=1e-4, klucb=klucbBern, c=c, lower=0., amplitude=1.):
super(klUCBPlusPlus, self).__init__(nbArms, tolerance=tolerance, klucb=klucb, c=c, lower=lower, amplitude=amplitude)
self.nbArms = float(self.nbArms) # Just speed up type casting by forcing it to be a float
self.horizon = int(horizon) #: Parameter :math:`T` = known horizon of the experiment.
def __str__(self):
name = "" if self.klucb.__name__[5:] == "Bern" else ", " + self.klucb.__name__[5:]
complement = "$T={}${}{}".format(self.horizon, name, "" if self.c == 1 else r", $c={:.3g}$".format(self.c))
return r"kl-UCB{}({})".format("$^{++}$", complement)
def computeIndex(self, arm):
r""" Compute the current index, at time t and after :math:`N_k(t)` pulls of arm k:
.. math::
\hat{\mu}_k(t) &= \frac{X_k(t)}{N_k(t)}, \\
U_k(t) &= \sup\limits_{q \in [a, b]} \left\{ q : \mathrm{kl}(\hat{\mu}_k(t), q) \leq \frac{c g(N_k(t), T, K)}{N_k(t)} \right\},\\
I_k(t) &= U_k(t).
If rewards are in :math:`[a, b]` (default to :math:`[0, 1]`) and :math:`\mathrm{kl}(x, y)` is the Kullback-Leibler divergence between two distributions of means x and y (see :mod:`Arms.kullback`),
and c is the parameter (default to 1), and where :math:`g(t, T, K)` is this function:
.. math::
g(t, T, K) &:= \log^+(y (1 + \log^+(y)^2)),\\
y &:= \frac{T}{K t}.
"""
if self.pulls[arm] < 1:
return float('+inf')
else:
# XXX We could adapt tolerance to the value of self.t
return self.klucb(self.rewards[arm] / self.pulls[arm], self.c * g(self.pulls[arm], self.horizon, self.nbArms) / self.pulls[arm], self.tolerance)
def computeAllIndex(self):
""" Compute the current indexes for all arms, in a vectorized manner."""
indexes = self.klucb_vect(self.rewards / self.pulls, self.c * g_vect(self.pulls, self.horizon, self.nbArms) / self.pulls, self.tolerance)
indexes[self.pulls < 1] = float('+inf')
self.index[:] = indexes
|
mordred/tests/__init__.py | UnixJunkie/mordred | 199 | 11109689 | <filename>mordred/tests/__init__.py
"""Mordred test package."""
|
examples/guv_simple_server.py | timgates42/guv | 120 | 11109730 | <reponame>timgates42/guv
"""Simple low-level network server
This module demonstrates how to use guv to create fast network servers. In addition, it can be
used to serve valid HTTP (as far as ``wrk`` is concerned) to benchmark concurrency and requests/sec.
Three basic client handlers are provided:
- :func:`handle_http_10` acts as an HTTP 1.0 server which sends a static message and closes the
connection (HTTP header ``Connection: close``, which is default for HTTP 1.0).
- :func:`handle_http_11` acts as an HTTP 1.1 server which sends a static message, but keeps the
connection alive (HTTP header ``Connection: keep-alive``, which is default for HTTP 1.1).
- :func:`handle_http` is a slightly more complex client handler which actually reads the client's
request and decides to either close or keep-alive the connection based on the HTTP version and
what the client wants. If the connection is to be kept alive, this handler cooperatively yields
control to other greenlets after every request, which significantly improves request/response
latency (as reported by wrk).
"""
import guv
guv.monkey_patch()
import guv.server
import guv.hubs
import guv.greenio
from guv import gyield
from guv.support import PYPY
import logging
import logger
logger.configure()
log = logging.getLogger()
if PYPY:
from http_parser.pyparser import HttpParser
log.debug('Using pure-Python HTTP parser')
USING_PYPARSER = True
else:
from http_parser.parser import HttpParser
log.debug('Using fast C HTTP parser')
USING_PYPARSER = False
def create_response(body, headers):
"""Create a simple HTTP response
:type body: str
:type headers: dict[str, str]
:rtype: bytes
"""
final_headers = {
'Connection': 'keep-alive',
'Content-Type': 'text/plain; charset=utf-8',
'Content-Encoding': 'UTF-8'
}
final_headers.update(headers)
lines = ['HTTP/1.1 200 OK']
lines.extend(['%s: %s' % (k, v) for k, v in final_headers.items()])
lines.append('Content-Length: %s' % len(body))
resp = ('\r\n'.join(lines)).encode('latin-1')
resp += ('\r\n\r\n' + body).encode(final_headers['Content-Encoding'])
return resp
def handle_http_10(sock, addr):
"""Very minimal client handler for HTTP 1.0 (Connection: close)
"""
data = sock.recv(4096)
if not data:
return
resp = create_response('Hello, world!', {'Connection': 'close'})
sock.sendall(resp)
sock.close()
def handle_http_11(sock, addr):
"""Very minimal client handler for HTTP 1.1 (Connection: keep-alive)
"""
while True:
data = sock.recv(4096)
if not data:
break
resp = create_response('Hello, world!', {'Connection': 'keep-alive'})
sock.sendall(resp)
sock.close()
def handle_http(sock, addr):
"""A more complicated handler which detects HTTP headers
"""
def recv_request(p):
while True:
data = sock.recv(8192)
if not data:
return False
nb = len(data)
nparsed = p.execute(data, nb)
assert nparsed == nb
if USING_PYPARSER and p.is_headers_complete():
h = p.get_headers()
if not (h.get('content-length') or h.get('transfer-length')):
# pass length=0 to signal end of body
# TODO: pyparser requires this, but not the C parser for some reason
p.execute(data, 0)
return True
if p.is_message_complete():
return True
# main request loop
while True:
p = HttpParser()
if not recv_request(p):
break
h = p.get_headers()
ka = p.should_keep_alive()
h_connection = 'keep-alive' if ka else 'close'
resp = create_response('Hello, world!', {'Connection': h_connection})
sock.sendall(resp)
if not ka:
break
else:
# we should keep-alive, but yield to drastically improve overall request/response
# latency
gyield()
sock.close()
handle = handle_http
def main():
try:
log.debug('Start')
server_sock = guv.listen(('0.0.0.0', 8001))
server = guv.server.Server(server_sock, handle, None, None)
server.start()
except (SystemExit, KeyboardInterrupt):
log.debug('Bye!')
if __name__ == '__main__':
main()
|
test/libs/Hybrid.py | kavyapnaik/PythonRemoteServer | 118 | 11109747 | <filename>test/libs/Hybrid.py
class Hybrid(object):
def get_keyword_names(self):
return ['passing', 'failing', 'logging', 'returning', 'kwargs']
def __getattr__(self, name):
try:
return globals()[name]
except KeyError:
raise AttributeError(name)
def passing(arg=None):
assert not arg or '=' not in arg
def failing(message):
raise AssertionError(message)
def logging(message, level='INFO'):
print('*%s* %s' % (level, message))
def returning():
return 'Hello, world!'
def kwargs(expected, **kws):
actual = ', '.join('%s: %s' % (k, kws[k]) for k in sorted(kws))
assert actual == expected
if __name__ == '__main__':
import sys
from robotremoteserver import RobotRemoteServer
RobotRemoteServer(Hybrid(), '127.0.0.1', *sys.argv[1:])
|
next_steps/operations/ml_ops_ds_sdk/lambda/stepfunction_waitforDatasetGroup.py | kamoljan/amazon-personalize-samples | 442 | 11109757 | <gh_stars>100-1000
import json
import base64
import boto3
personalize = boto3.client('personalize')
personalize_runtime = boto3.client('personalize-runtime')
def lambda_handler(event, context):
# TODO implement
datasetGroupArnVal = event['input']
describe_dataset_group_response = personalize.describe_dataset_group(
datasetGroupArn = datasetGroupArnVal
#datasetGroupArn = event['Payload']['datasetGroupArn']
)
#personalize.describe_dataset_group
#print("DatasetGroup: {}".format(datasetGroupArn))
return_status = False
status = describe_dataset_group_response["datasetGroup"]["status"]
print("DatasetGroup: {}".format(status))
return {
'status': status,
'DatasetGroup': status,
'datasetGroupArn': datasetGroupArnVal,
'schemaArn': event['schemaArn']
#'body': json.dumps('Hello from Lambda!')
}
|
tests/integration/butterfree/load/conftest.py | fossabot/butterfree | 208 | 11109759 | from pytest import fixture
from butterfree.constants import DataType, columns
from butterfree.transform import FeatureSet
from butterfree.transform.features import Feature, KeyFeature, TimestampFeature
@fixture
def input_dataframe(spark_context, spark_session):
data = [
{
"id": 1,
"timestamp": "2019-12-01",
"feature": 100,
columns.PARTITION_YEAR: 2019,
columns.PARTITION_MONTH: 12,
columns.PARTITION_DAY: 1,
},
{
"id": 2,
"timestamp": "2020-01-01",
"feature": 200,
columns.PARTITION_YEAR: 2020,
columns.PARTITION_MONTH: 1,
columns.PARTITION_DAY: 1,
},
{
"id": 1,
"timestamp": "2020-02-01",
"feature": 110,
columns.PARTITION_YEAR: 2020,
columns.PARTITION_MONTH: 2,
columns.PARTITION_DAY: 1,
},
{
"id": 1,
"timestamp": "2020-02-02",
"feature": 120,
columns.PARTITION_YEAR: 2020,
columns.PARTITION_MONTH: 2,
columns.PARTITION_DAY: 2,
},
]
return spark_session.read.json(spark_context.parallelize(data, 1))
@fixture
def feature_set():
key_features = [
KeyFeature(name="id", description="Description", dtype=DataType.INTEGER)
]
ts_feature = TimestampFeature(from_column="timestamp")
features = [
Feature(name="feature", description="Description", dtype=DataType.FLOAT),
]
return FeatureSet(
"test_sink_feature_set",
"test_sink_entity",
"description",
keys=key_features,
timestamp=ts_feature,
features=features,
)
|
tests/cell_fabric/test_gen_lef.py | pretl/ALIGN-public | 119 | 11109762 | <filename>tests/cell_fabric/test_gen_lef.py
from align.cell_fabric import gen_lef, pdk
import filecmp
import pathlib
mydir = pathlib.Path(__file__).resolve().parent
pdkdir = pathlib.Path(__file__).parent.parent.parent / "pdks" / "FinFET14nm_Mock_PDK"
p = pdk.Pdk().load(pdkdir / 'layers.json')
def test_lef():
block_name = "foo"
json_file_name = mydir / "__json_diff_pair_cand_lef"
with open( json_file_name, "rt") as fp0, \
open( mydir / "foo.lef_cand", 'wt') as fp1:
gen_lef.json_lef( json_file_name, 'foo_cand',
cell_pin=['DA','DB','S', 'B'], bodyswitch=0, blockM=0, p=p)
assert filecmp.cmp( mydir / "foo_cand.lef", mydir / "foo.lef_gold")
|
resnet/frelu.py | megvii-model/FunnelAct | 178 | 11109785 | <gh_stars>100-1000
import megengine.functional as F
import megengine.module as M
class FReLU(M.Module):
r""" FReLU formulation. The funnel condition has a window size of kxk. (k=3 by default)
"""
def __init__(self, in_channels):
super().__init__()
self.conv_frelu = M.Conv2d(in_channels, in_channels, 3, 1, 1, groups=in_channels)
self.bn_frelu = M.BatchNorm2d(in_channels)
def forward(self, x):
x1 = self.conv_frelu(x)
x1 = self.bn_frelu(x1)
x = F.maximum(x, x1)
return x
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.