max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
Mythras/upload2roll20.py | ManuZzZ85/roll20-character-sheets | 1,104 | 11153410 | import json
import requests
from pathlib import Path
home = str(Path.home())
with open("{}/.roll20/config.json".format(home)) as config_file:
configs = json.load(config_file)
login_data = {'email': configs['email'], 'password': configs['password']}
roll20session = requests.Session()
login_result = roll20session.post('https://app.roll20.net/sessions/create', login_data)
if login_result:
print("Roll20 login successful.")
else:
print("Error logging into Roll20!")
exit(1)
with open('Mythras.html', 'r') as html_file:
html_src = html_file.read()
with open('Mythras.css', 'r') as css_file:
css_src = css_file.read()
with open('translation.json', 'r') as translation_file:
translation_src = translation_file.read()
sheet_data = {
'publicaccess': 'true',
'bgimage': 'none',
'allowcharacterimport': 'true',
'scale_units': 'ft',
'grid_type': 'square',
'diagonaltype': 'foure',
'bar_location': 'above',
'barStyle': 'standard',
'compendium_override': '',
'sharecompendiums': 'false',
'charsheettype': 'custom',
'customcharsheet_layout': html_src,
'customcharsheet_style': css_src,
'customcharsheet_translation': translation_src
}
upload_result = roll20session.post("https://app.roll20.net/campaigns/savesettings/{}".format(configs['campaign']), sheet_data)
if upload_result:
print("Sheet uploaded successfully.")
else:
print("Error uploading sheet content!")
exit(2)
|
win/pywinauto/XMLHelpers.py | sk8darr/BrowserRefresh-Sublime | 191 | 11153415 | # GUI Application automation and testing library
# Copyright (C) 2006 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""Module containing operations for reading and writing dialogs as XML
"""
__revision__ = "$Revision: 736 $"
# how should we read in the XML file
# NOT USING MS Components (requirement on machine)
# maybe using built in XML
# maybe using elementtree
# others?
#import elementtree
try:
# Python 2.5 (thanks to Dais<NAME>)
from xml.etree.ElementTree import Element, SubElement, ElementTree
from xml.etree.cElementTree import Element, SubElement, ElementTree
except ImportError:
from elementtree.ElementTree import Element, SubElement, ElementTree
from cElementTree import Element, SubElement, ElementTree
import ctypes
import re
import PIL.Image
from . import controls
# reported that they are not used - but in fact they are
# through a search of globals()
from .win32structures import LOGFONTW, RECT
class XMLParsingError(RuntimeError):
"Wrap parsing Exceptions"
pass
#DONE: Make the dialog reading function not actually know about the
# types of each element (so that we can read the control properties
# without having to know each and every element type)
# probably need to store info on what type things are.
#
# - if it is a ctypes struct then there is a __type__ field
# which says what kind of stuct it is
# - If it is an image then a "_IMG" is appeded to the the element tag
# - if it is a long then _LONG is appended to attribute name
# everything else is considered a string!
#-----------------------------------------------------------------------------
def _SetNodeProps(element, name, value):
"Set the properties of the node based on the type of object"
# if it is a ctypes structure
if isinstance(value, ctypes.Structure):
# create an element for the structure
struct_elem = SubElement(element, name)
#clsModule = value.__class__.__module__
cls_name = value.__class__.__name__
struct_elem.set("__type__", "%s" % cls_name)
# iterate over the fields in the structure
for prop_name in value._fields_:
prop_name = prop_name[0]
item_val = getattr(value, prop_name)
if isinstance(item_val, int):
prop_name += "_LONG"
item_val = str(item_val)
struct_elem.set(prop_name, _EscapeSpecials(item_val))
elif hasattr(value, 'tostring') and hasattr(value, 'size'):
try:
# if the image is too big then don't try to
# write it out - it would probably product a MemoryError
# anyway
if value.size[0] * value.size[1] > (5000*5000):
raise MemoryError
image_data = value.tostring().encode("bz2").encode("base64")
_SetNodeProps(
element,
name + "_IMG",
{
"mode": value.mode,
"size_x":value.size[0],
"size_y":value.size[1],
"data":image_data
})
# a system error is raised from time to time when we try to grab
# the image of a control that has 0 height or width
except (SystemError, MemoryError):
pass
elif isinstance(value, (list, tuple)):
# add the element to hold the values
# we do this to be able to support empty lists
listelem = SubElement(element, name + "_LIST")
for i, attrval in enumerate(value):
_SetNodeProps(listelem, "%s_%05d"%(name, i), attrval)
elif isinstance(value, dict):
dict_elem = SubElement(element, name)
for item_name, val in list(value.items()):
_SetNodeProps(dict_elem, item_name, val)
else:
if isinstance(value, bool):
value = int(value)
if isinstance(value, int):
name += "_LONG"
element.set(name, _EscapeSpecials(value))
#-----------------------------------------------------------------------------
def WriteDialogToFile(filename, props):
"""Write the props to the file
props can be either a dialog of a dictionary
"""
# if we are passed in a wrapped handle then
# get the properties
try:
list(props[0].keys())
except (TypeError, AttributeError):
props = controls.GetDialogPropsFromHandle(props)
# build a tree structure
root = Element("DIALOG")
root.set("_version_", "2.0")
for ctrl in props:
ctrlelem = SubElement(root, "CONTROL")
for name, value in sorted(ctrl.items()):
_SetNodeProps(ctrlelem, name, value)
# wrap it in an ElementTree instance, and save as XML
tree = ElementTree(root)
tree.write(filename, encoding="utf-8")
#-----------------------------------------------------------------------------
def _EscapeSpecials(string):
"Ensure that some characters are escaped before writing to XML"
# ensure it is unicode
string = str(string)
# escape backslashs
string = string.replace('\\', r'\\')
# escape non printable characters (chars below 30)
for i in range(0, 32):
string = string.replace(chr(i), "\\%02d"%i)
return string
#-----------------------------------------------------------------------------
def _UnEscapeSpecials(string):
"Replace escaped characters with real character"
# Unescape all the escape characters
for i in range(0, 32):
string = string.replace("\\%02d"%i, chr(i))
# convert doubled backslashes to a single backslash
string = string.replace(r'\\', '\\')
return str(string)
#-----------------------------------------------------------------------------
def _XMLToStruct(element, struct_type = None):
"""Convert an ElementTree to a ctypes Struct
If struct_type is not specified then element['__type__']
will be used for the ctypes struct type"""
# handle if we are passed in an element or a dictionary
try:
attribs = element.attrib
except AttributeError:
attribs = element
# if the type has not been passed in
if not struct_type:
# get the type and create an instance of the type
struct = globals()[attribs["__type__"]]()
else:
# create an instance of the type
struct = globals()[struct_type]()
# get the attribute and set them upper case
struct_attribs = dict([(at.upper(), at) for at in dir(struct)])
# for each of the attributes in the element
for prop_name in attribs:
# get the value
val = attribs[prop_name]
# if the value ends with "_long"
if prop_name.endswith("_LONG"):
# get an long attribute out of the value
val = int(val)
prop_name = prop_name[:-5]
# if the value is a string
elif isinstance(val, str):
# make sure it if Unicode
val = str(val)
# now we can have all upper case attribute name
# but structure name will not be upper case
if prop_name.upper() in struct_attribs:
prop_name = struct_attribs[prop_name.upper()]
# set the appropriate attribute of the Struct
setattr(struct, prop_name, val)
# reutrn the struct
return struct
#====================================================================
def _OLD_XMLToTitles(element):
"For OLD XML files convert the titles as a list"
# get all the attribute names
title_names = list(element.keys())
# sort them to make sure we get them in the right order
title_names.sort()
# build up the array
titles = []
for name in title_names:
val = element[name]
val = val.replace('\\n', '\n')
val = val.replace('\\x12', '\x12')
val = val.replace('\\\\', '\\')
titles.append(str(val))
return titles
#====================================================================
# TODO: this function should be broken up into smaller functions
# for each type of processing e.g.
# ElementTo
def _ExtractProperties(properties, prop_name, prop_value):
"""Hmmm - confusing - can't remember exactly how
all these similar functions call each other"""
# get the base property name and number if it in the form
# "PROPNAME_00001" = ('PROPNAME', 1)
prop_name, reqd_index = _SplitNumber(prop_name)
# if there is no required index, and the property
# was not already set - then just set it
# if this is an indexed member of a list
if reqd_index == None:
# Have we hit a property with this name already
if prop_name in properties:
# try to append current value to the property
try:
properties[prop_name].append(prop_value)
# if that fails then we need to make sure that
# the curruen property is a list and then
# append it
except AttributeError:
new_val = [properties[prop_name], prop_value]
properties[prop_name] = new_val
# No index, no previous property with that name
# - just set the property
else:
properties[prop_name] = prop_value
# OK - so it HAS an index
else:
# make sure that the property is a list
properties.setdefault(prop_name, [])
# make sure that the list has enough elements
while 1:
if len(properties[prop_name]) <= reqd_index:
properties[prop_name].append('')
else:
break
# put our value in at the right index
properties[prop_name][reqd_index] = prop_value
#====================================================================
def _GetAttributes(element):
"Get the attributes from an element"
properties = {}
# get all the attributes
for attrib_name, val in list(element.attrib.items()):
# if it is 'Long' element convert it to an long
if attrib_name.endswith("_LONG"):
val = int(val)
attrib_name = attrib_name[:-5]
else:
# otherwise it is a string - make sure we get it as a unicode string
val = _UnEscapeSpecials(val)
_ExtractProperties(properties, attrib_name, val)
return properties
#====================================================================
number = re.compile(r"^(.*)_(\d{5})$")
def _SplitNumber(prop_name):
"""Return (string, number) for a prop_name in the format string_number
The number part has to be 5 digits long
None is returned if there is no _number part
e.g.
>>> _SplitNumber("NoNumber")
('NoNumber', None)
>>> _SplitNumber("Anumber_00003")
('Anumber', 3)
>>> _SplitNumber("notEnoughDigits_0003")
('notEnoughDigits_0003', None)
"""
found = number.search(prop_name)
if not found:
return prop_name, None
return found.group(1), int(found.group(2))
#====================================================================
def _ReadXMLStructure(control_element):
"""Convert an element into nested Python objects
The values will be returned in a dictionary as following:
- the attributes will be items of the dictionary
for each subelement
+ if it has a __type__ attribute then it is converted to a
ctypes structure
+ if the element tag ends with _IMG then it is converted to
a PIL image
- If there are elements with the same name or attributes with
ordering e.g. texts_00001, texts_00002 they will be put into a
list (in the correct order)
"""
# get the attributes for the current element
properties = _GetAttributes(control_element)
for elem in control_element:
# if it is a ctypes structure
if "__type__" in elem.attrib:
# create a new instance of the correct type
# grab the data
propval = _XMLToStruct(elem)
elif elem.tag.endswith("_IMG"):
elem.tag = elem.tag[:-4]
# get image Attribs
img = _GetAttributes(elem)
data = img['data'].decode('base64').decode('bz2')
propval = PIL.Image.fromstring(
img['mode'],
(img['size_x'], img['size_y']),
data)
elif elem.tag.endswith("_LIST"):
# All this is just to handle the edge case of
# an empty list
elem.tag = elem.tag[:-5]
# read the structure
propval = _ReadXMLStructure(elem)
# if it was empty then convert the returned dict
# to a list
if propval == {}:
propval = list()
# otherwise extract the list out of the returned dict
else:
propval = propval[elem.tag]
else:
propval = _ReadXMLStructure(elem)
_ExtractProperties(properties, elem.tag, propval)
return properties
#====================================================================
def ReadPropertiesFromFile(filename):
"""Return an list of controls from XML file filename"""
# parse the file
parsed = ElementTree().parse(filename)
# Return the list that has been stored under 'CONTROL'
props = _ReadXMLStructure(parsed)['CONTROL']
if not isinstance(props, list):
props = [props]
# it is an old XML so let's fix it up a little
if "_version_" not in parsed.attrib:
# find each of the control elements
for ctrl_prop in props:
ctrl_prop['Fonts'] = [_XMLToStruct(ctrl_prop['FONT'], "LOGFONTW"), ]
ctrl_prop['Rectangle'] = \
_XMLToStruct(ctrl_prop["RECTANGLE"], "RECT")
ctrl_prop['ClientRects'] = [
_XMLToStruct(ctrl_prop["CLIENTRECT"], "RECT"),]
ctrl_prop['Texts'] = _OLD_XMLToTitles(ctrl_prop["TITLES"])
ctrl_prop['Class'] = ctrl_prop['CLASS']
ctrl_prop['ContextHelpID'] = ctrl_prop['HELPID']
ctrl_prop['ControlID'] = ctrl_prop['CTRLID']
ctrl_prop['ExStyle'] = ctrl_prop['EXSTYLE']
ctrl_prop['FriendlyClassName'] = ctrl_prop['FRIENDLYCLASS']
ctrl_prop['IsUnicode'] = ctrl_prop['ISUNICODE']
ctrl_prop['IsVisible'] = ctrl_prop['ISVISIBLE']
ctrl_prop['Style'] = ctrl_prop['STYLE']
ctrl_prop['UserData'] = ctrl_prop['USERDATA']
for prop_name in [
'CLASS',
'CLIENTRECT',
'CTRLID',
'EXSTYLE',
'FONT',
'FRIENDLYCLASS',
'HELPID',
'ISUNICODE',
'ISVISIBLE',
'RECTANGLE',
'STYLE',
'TITLES',
'USERDATA',
]:
del(ctrl_prop[prop_name])
return props
|
qa/qa_network.py | modulus-sa/ganeti | 396 | 11153417 | #
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""QA tests for networks.
"""
from qa import qa_config
from qa import qa_tags
from qa import qa_utils
from ganeti import query
from qa_utils import AssertCommand
def TestNetworkList():
"""gnt-network list"""
qa_utils.GenericQueryTest("gnt-network", list(query.NETWORK_FIELDS))
def TestNetworkListFields():
"""gnt-network list-fields"""
qa_utils.GenericQueryFieldsTest("gnt-network", list(query.NETWORK_FIELDS))
def GetNonexistentNetworks(count):
"""Gets network names which shouldn't exist on the cluster.
@param count: Number of networks to get
@rtype: integer
"""
return qa_utils.GetNonexistentEntityNames(count, "networks", "network")
def TestNetworkAddRemove():
"""gnt-network add/remove"""
(network1, network2) = GetNonexistentNetworks(2)
# Add some networks of different sizes.
# Note: Using RFC5737 addresses.
AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/30", network1])
AssertCommand(["gnt-network", "add", "--network", "198.51.100.0/24",
network2])
# Try to add a network with an existing name.
AssertCommand(["gnt-network", "add", "--network", "172.16.58.3/24", network2],
fail=True)
TestNetworkList()
TestNetworkListFields()
AssertCommand(["gnt-network", "remove", network1])
AssertCommand(["gnt-network", "remove", network2])
TestNetworkList()
def TestNetworkTags():
"""gnt-network tags"""
(network, ) = GetNonexistentNetworks(1)
AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/30", network])
qa_tags.TestNetworkTags(network)
AssertCommand(["gnt-network", "remove", network])
def TestNetworkConnect():
"""gnt-network connect/disconnect"""
(group1, ) = qa_utils.GetNonexistentGroups(1)
(network1, ) = GetNonexistentNetworks(1)
default_mode = "bridged"
default_link = "xen-br0"
nicparams = qa_config.get("default-nicparams")
if nicparams:
mode = nicparams.get("mode", default_mode)
link = nicparams.get("link", default_link)
else:
mode = default_mode
link = default_link
nicparams = "mode=%s,link=%s" % (mode, link)
AssertCommand(["gnt-group", "add", group1])
AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/24", network1])
AssertCommand(["gnt-network", "connect", "--nic-parameters", nicparams,
network1, group1])
TestNetworkList()
AssertCommand(["gnt-network", "disconnect", network1, group1])
AssertCommand(["gnt-group", "remove", group1])
AssertCommand(["gnt-network", "remove", network1])
|
tools/rs-sysmon/plugins/dstat_sendmail.py | y-sira/oltpbench | 368 | 11153423 | <filename>tools/rs-sysmon/plugins/dstat_sendmail.py<gh_stars>100-1000
### Author: <NAME> <<EMAIL>>
### FIXME: Should read /var/log/mail/statistics or /etc/mail/statistics (format ?)
class dstat_plugin(dstat):
def __init__(self):
self.name = 'sendmail'
self.vars = ('queue',)
self.type = 'd'
self.width = 4
self.scale = 100
def check(self):
if not os.access('/var/spool/mqueue', os.R_OK):
raise Exception, 'Cannot access sendmail queue'
def extract(self):
self.val['queue'] = len(glob.glob('/var/spool/mqueue/qf*'))
# vim:ts=4:sw=4:et
|
tests/utilities/test_parameter_tying.py | Code-Cornelius/pytorch-lightning | 15,666 | 11153430 | <gh_stars>1000+
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from pytorch_lightning.utilities import find_shared_parameters, set_shared_parameters
from tests.helpers import BoringModel
class ParameterSharingModule(BoringModel):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(32, 10, bias=False)
self.layer_2 = nn.Linear(10, 32, bias=False)
self.layer_3 = nn.Linear(32, 10, bias=False)
self.layer_3.weight = self.layer_1.weight
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
return x
@pytest.mark.parametrize(
["model", "expected_shared_params"],
[(BoringModel, []), (ParameterSharingModule, [["layer_1.weight", "layer_3.weight"]])],
)
def test_find_shared_parameters(model, expected_shared_params):
assert expected_shared_params == find_shared_parameters(model())
def test_set_shared_parameters():
model = ParameterSharingModule()
set_shared_parameters(model, [["layer_1.weight", "layer_3.weight"]])
assert torch.all(torch.eq(model.layer_1.weight, model.layer_3.weight))
class SubModule(nn.Module):
def __init__(self, layer):
super().__init__()
self.layer = layer
def forward(self, x):
return self.layer(x)
class NestedModule(BoringModel):
def __init__(self):
super().__init__()
self.layer = nn.Linear(32, 10, bias=False)
self.net_a = SubModule(self.layer)
self.layer_2 = nn.Linear(10, 32, bias=False)
self.net_b = SubModule(self.layer)
def forward(self, x):
x = self.net_a(x)
x = self.layer_2(x)
x = self.net_b(x)
return x
model = NestedModule()
set_shared_parameters(model, [["layer.weight", "net_a.layer.weight", "net_b.layer.weight"]])
assert torch.all(torch.eq(model.net_a.layer.weight, model.net_b.layer.weight))
|
volrender/volrender.py | s-kistler/PyPlay | 505 | 11153473 | <filename>volrender/volrender.py
"""
volrender.py
Author: <NAME>
A Ray Casting Volume Renderer for medical data visualization.
"""
import sys, argparse, os
from slicerender import *
from raycast import *
import glfw
class RenderWin:
"""GLFW Rendering window class"""
def __init__(self, imageDir):
# save current working directory
cwd = os.getcwd()
# initialize glfw - this changes cwd
glfw.glfwInit()
# restore cwd
os.chdir(cwd)
# version hints
glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MAJOR, 3)
glfw.glfwWindowHint(glfw.GLFW_CONTEXT_VERSION_MINOR, 3)
glfw.glfwWindowHint(glfw.GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE)
glfw.glfwWindowHint(glfw.GLFW_OPENGL_PROFILE, glfw.GLFW_OPENGL_CORE_PROFILE)
# make a window
self.width, self.height = 512, 512
self.aspect = self.width/float(self.height)
self.win = glfw.glfwCreateWindow(self.width, self.height, b"volrender")
# make context current
glfw.glfwMakeContextCurrent(self.win)
# initialize GL
glViewport(0, 0, self.width, self.height)
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
# set window callbacks
glfw.glfwSetMouseButtonCallback(self.win, self.onMouseButton)
glfw.glfwSetKeyCallback(self.win, self.onKeyboard)
glfw.glfwSetWindowSizeCallback(self.win, self.onSize)
# load volume data
self.volume = volreader.loadVolume(imageDir)
# create renderer
self.renderer = RayCastRender(self.width, self.height, self.volume)
# exit flag
self.exitNow = False
def onMouseButton(self, win, button, action, mods):
#print 'mouse button: ', win, button, action, mods
pass
def onKeyboard(self, win, key, scancode, action, mods):
#print 'keyboard: ', win, key, scancode, action, mods
# ESC to quit
if key is glfw.GLFW_KEY_ESCAPE:
self.renderer.close()
self.exitNow = True
else:
if action is glfw.GLFW_PRESS or action is glfw.GLFW_REPEAT:
if key == glfw.GLFW_KEY_V:
# toggle render mode
if isinstance(self.renderer, RayCastRender):
self.renderer = SliceRender(self.width, self.height,
self.volume)
else:
self.renderer = RayCastRender(self.width, self.height,
self.volume)
# call reshape on renderer
self.renderer.reshape(self.width, self.height)
else:
# send key press to renderer
keyDict = {glfw.GLFW_KEY_X : 'x', glfw.GLFW_KEY_Y: 'y',
glfw.GLFW_KEY_Z: 'z',
glfw.GLFW_KEY_LEFT: 'l', glfw.GLFW_KEY_RIGHT: 'r'}
try:
self.renderer.keyPressed(keyDict[key])
except:
pass
def onSize(self, win, width, height):
#print 'onsize: ', win, width, height
self.width = width
self.height = height
self.aspect = width/float(height)
glViewport(0, 0, self.width, self.height)
self.renderer.reshape(width, height)
def run(self):
# start loop
while not glfw.glfwWindowShouldClose(self.win) and not self.exitNow:
# render
self.renderer.draw()
# swap buffers
glfw.glfwSwapBuffers(self.win)
# wait for events
glfw.glfwWaitEvents()
# end
glfw.glfwTerminate()
# main() function
def main():
print('starting volrender...')
# create parser
parser = argparse.ArgumentParser(description="Volume Rendering...")
# add expected arguments
parser.add_argument('--dir', dest='imageDir', required=True)
# parse args
args = parser.parse_args()
# create render window
rwin = RenderWin(args.imageDir)
rwin.run()
# call main
if __name__ == '__main__':
main()
|
Contents/Libraries/Shared/pysrt/version.py | jippo015/Sub-Zero.bundle | 1,553 | 11153480 | <filename>Contents/Libraries/Shared/pysrt/version.py
VERSION = (1, 0, 1)
VERSION_STRING = '.'.join(str(i) for i in VERSION)
|
config.py | Google1234/Information_retrieva_Projectl- | 139 | 11153505 | <filename>config.py
#-*- coding: UTF-8 -*-
#针对查询 返回的记录数
query_return_numbers=20
#针对查询 返回的每条摘要长度
query_return_snipper_size=250
#针对某项查询 推荐的相似文档数目
recommand_numbers=3
#针对搜索词 推荐的相似主题数目
recommand_topic_numbers=10
#按块读取文件,块大小 :用于read_block() write_bloack(),针对只需读写一遍的大文件
buff_size=1024*1024*10
#将文件部分记录保存至内存,内存大小 ,内存中命中某项记录,直接从内存中取出记录,否则从磁盘读取记录,同时更新内存: 针对需多次读写的文件
cache_size=1024*1024*10
#爬取的网页数目
crawled_web_numbers=100000
#存储爬虫数据 文件名
#格式:doc_id#####title#####content#####url#####
#编码:utf-8
data_filename="_data.txt"
#存储爬取网页链接数据 文件名
#格式:url#####
#编码:utf-8
url_filename="_url.txt"
#存储网页倒排索引数据 文件名
#格式:word:doc_id#tf:doc_id#tf|
#编码:utf-8
inverted_index_filename="_data_inverted_index.txt"
#存储 文件名
#格式:word:word文档频率df:此项word在倒排索引文件中字节位置:此项word倒排索引长度
#注:起始位置为词项对应的以一个doc_id位置 绝对地址
# 所占字节数 从第一个doc_id存储位置至 最后一个tf存储位置止,而不是最后的'|' 绝对地址
#实例: 陈晓华:3:177112568:22|
#编码:utf-8
inverted_Dictionary_filename="_data_inverted_Dictionary.txt"
#存储网页倒排索引数据 文件名
#格式:doc_id:此项网页在爬取数据中字节位置:此项网页长度|
#注: 网页长度包括末尾的'#####'
#实例 77719:176500446:1723
#编码:utf-8
index_filename="_index.txt"
#存储与此项文档相似的文档id 文件名
#格式:doc_id:相似网页id:相似网页id|
#编码:utf-8
similar_filename="_similar.txt"
#存储停用词 文件名
#格式:停用词/n
#注意 文件名前无前缀
#编码:utf-8
stopword_filename="stopword.txt"
#存储训练好word2Vec 文件名
word2Vec_filename="vectors.bin" |
test/statements/import4.py | kylebarron/MagicPython | 1,482 | 11153548 | <gh_stars>1000+
from....foo import a
from...foo import b
from..foo import c
from.foo import d
from : keyword.control.import.python, source.python
.... : punctuation.separator.period.python, source.python
foo : source.python
: source.python
import : keyword.control.import.python, source.python
: source.python
a : source.python
from : keyword.control.import.python, source.python
... : punctuation.separator.period.python, source.python
foo : source.python
: source.python
import : keyword.control.import.python, source.python
: source.python
b : source.python
from : keyword.control.import.python, source.python
.. : punctuation.separator.period.python, source.python
foo : source.python
: source.python
import : keyword.control.import.python, source.python
: source.python
c : source.python
from : keyword.control.import.python, source.python
. : punctuation.separator.period.python, source.python
foo : source.python
: source.python
import : keyword.control.import.python, source.python
: source.python
d : source.python
|
proposals/generate_iter_proposals.py | LLLjun/learn-to-cluster | 620 | 11153586 | <filename>proposals/generate_iter_proposals.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import numpy as np
from utils import (read_meta, write_meta, build_knns, labels2clusters,
clusters2labels, BasicDataset, Timer)
from proposals import super_vertex, filter_clusters, save_proposals
def parse_args():
parser = argparse.ArgumentParser(
description='Generate Iterative Proposals')
parser.add_argument("--name",
type=str,
default='part1_test',
help="image features")
parser.add_argument("--prefix",
type=str,
default='./data',
help="prefix of dataset")
parser.add_argument("--oprefix",
type=str,
default='./data/cluster_proposals',
help="prefix of saving super vertx")
parser.add_argument("--dim",
type=int,
default=256,
help="dimension of feature")
parser.add_argument("--no_normalize",
action='store_true',
help="normalize feature by default")
parser.add_argument('--k', default=3, type=int)
parser.add_argument('--th_knn', default=0.6, type=float)
parser.add_argument('--th_step', default=0.05, type=float)
parser.add_argument('--knn_method',
default='faiss',
choices=['faiss', 'hnsw'])
parser.add_argument('--minsz', default=3, type=int)
parser.add_argument('--maxsz', default=500, type=int)
parser.add_argument('--sv_minsz', default=2, type=int)
parser.add_argument('--sv_maxsz', default=5, type=int)
parser.add_argument("--sv_labels",
type=str,
default=None,
help="super vertex labels")
parser.add_argument("--sv_knn_prefix",
type=str,
default=None,
help="super vertex precomputed knn")
parser.add_argument('--is_rebuild', action='store_true')
parser.add_argument('--is_save_proposals', action='store_true')
parser.add_argument('--force', action='store_true')
args = parser.parse_args()
return args
def parse_path(s):
s = os.path.dirname(s)
s = s.split('/')[-1]
lst = s.split('_')
lst.insert(0, 'knn_method')
dic1 = {}
for i in range(0, len(lst), 2):
dic1[lst[i]] = lst[i + 1]
dic = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
assert dic == dic1
return dic
def get_iter_from_path(s):
return int(parse_path(s)['iter'])
def get_knns_from_path(s, knn_prefix, feats):
dic = parse_path(s)
k = int(dic['k'])
knn_method = dic['knn_method']
knns = build_knns(knn_prefix, feats, knn_method, k, is_rebuild=False)
return knns
def generate_iter_proposals(oprefix,
knn_prefix,
feats,
feat_dim=256,
knn_method='faiss',
k=80,
th_knn=0.6,
th_step=0.05,
minsz=3,
maxsz=300,
sv_minsz=2,
sv_maxsz=5,
sv_labels=None,
sv_knn_prefix=None,
is_rebuild=False,
is_save_proposals=True,
force=False,
**kwargs):
assert sv_minsz >= 2, "sv_minsz >= 2 to avoid duplicated proposals"
print('k={}, th_knn={}, th_step={}, minsz={}, maxsz={}, '
'sv_minsz={}, sv_maxsz={}, is_rebuild={}'.format(
k, th_knn, th_step, minsz, maxsz, sv_minsz, sv_maxsz,
is_rebuild))
if not os.path.exists(sv_labels):
raise FileNotFoundError('{} not found.'.format(sv_labels))
if sv_knn_prefix is None:
sv_knn_prefix = knn_prefix
# get iter and knns from super vertex path
_iter = get_iter_from_path(sv_labels) + 1
knns_inst = get_knns_from_path(sv_labels, sv_knn_prefix, feats)
print('read sv_clusters from {}'.format(sv_labels))
sv_lb2idxs, sv_idx2lb = read_meta(sv_labels)
inst_num = len(sv_idx2lb)
sv_clusters = labels2clusters(sv_lb2idxs)
# sv_clusters = filter_clusters(sv_clusters, minsz)
feats = np.array([feats[c, :].mean(axis=0) for c in sv_clusters])
print('average feature of super vertices:', feats.shape)
# build knns
knns = build_knns(knn_prefix, feats, knn_method, k, is_rebuild)
# obtain cluster proposals
ofolder = os.path.join(
oprefix,
'{}_k_{}_th_{}_step_{}_minsz_{}_maxsz_{}_sv_minsz_{}_maxsz_{}_iter_{}'.
format(knn_method, k, th_knn, th_step, minsz, maxsz, sv_minsz,
sv_maxsz, _iter))
ofn_pred_labels = os.path.join(ofolder, 'pred_labels.txt')
if not os.path.exists(ofolder):
os.makedirs(ofolder)
if not os.path.isfile(ofn_pred_labels) or is_rebuild:
with Timer('build super vertices (iter={})'.format(_iter)):
clusters = super_vertex(knns, k, th_knn, th_step, sv_maxsz)
clusters = filter_clusters(clusters, sv_minsz)
clusters = [[x for c in cluster for x in sv_clusters[c]]
for cluster in clusters]
with Timer('dump clustering to {}'.format(ofn_pred_labels)):
labels = clusters2labels(clusters)
write_meta(ofn_pred_labels, labels, inst_num=inst_num)
else:
print('read clusters from {}'.format(ofn_pred_labels))
lb2idxs, _ = read_meta(ofn_pred_labels)
clusters = labels2clusters(lb2idxs)
clusters = filter_clusters(clusters, minsz, maxsz)
# output cluster proposals
ofolder_proposals = os.path.join(ofolder, 'proposals')
if is_save_proposals:
print('saving cluster proposals to {}'.format(ofolder_proposals))
if not os.path.exists(ofolder_proposals):
os.makedirs(ofolder_proposals)
save_proposals(clusters,
knns_inst,
ofolder=ofolder_proposals,
force=force)
return ofolder_proposals, ofn_pred_labels
if __name__ == '__main__':
args = parse_args()
ds = BasicDataset(name=args.name,
prefix=args.prefix,
dim=args.dim,
normalize=not args.no_normalize)
ds.info()
sv_folder = os.path.dirname(args.sv_labels)
generate_iter_proposals(sv_folder,
os.path.join(sv_folder, 'knns'),
ds.features,
args.dim,
args.knn_method,
args.k,
args.th_knn,
args.th_step,
args.minsz,
args.maxsz,
args.sv_minsz,
args.sv_maxsz,
sv_labels=args.sv_labels,
sv_knn_prefix=args.sv_knn_prefix,
is_rebuild=args.is_rebuild,
is_save_proposals=args.is_save_proposals,
force=args.force)
|
src/veGiantModel/distributed/__init__.py | sljlp/veGiantModel | 101 | 11153596 | <reponame>sljlp/veGiantModel
from .. import patcher as dist
from megatron import mpu
def get_model_parallel_world_size():
return dist.get_model_parallel_world_size()
def get_model_parallel_rank():
return dist.get_model_parallel_rank()
def get_data_parallel_world_size():
return dist.get_data_parallel_world_size()
def get_model_parallel_group():
return dist.get_model_parallel_group()
def get_grid():
return dist.get_grid()
def copy_to_model_parallel_region(input_):
return mpu.copy_to_model_parallel_region(input_)
def reduce_from_model_parallel_region(input_):
return mpu.reduce_from_model_parallel_region(input_)
def gather_from_model_parallel_region(input_):
return mpu.gather_from_model_parallel_region(input_)
|
tests/unit/module/test_cdk.py | blade2005/runway | 134 | 11153619 | """Test runway.module.cdk."""
# pylint: disable=no-self-use,unused-argument
# pyright: basic
from __future__ import annotations
import logging
from subprocess import CalledProcessError
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from unittest.mock import call
import pytest
from mock import Mock
from runway.config.models.runway.options.cdk import RunwayCdkModuleOptionsDataModel
from runway.module.cdk import CloudDevelopmentKit, CloudDevelopmentKitOptions
if TYPE_CHECKING:
from pathlib import Path
from pytest import LogCaptureFixture
from pytest_mock import MockerFixture
from pytest_subprocess import FakeProcess
from pytest_subprocess.core import FakePopen
from runway.context import RunwayContext
from runway.module.cdk import CdkCommandTypeDef
MODULE = "runway.module.cdk"
@pytest.mark.usefixtures("patch_module_npm")
class TestCloudDevelopmentKit:
"""Test CloudDevelopmentKit."""
def test_cdk_bootstrap(
self,
caplog: LogCaptureFixture,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_bootstrap."""
caplog.set_level(logging.INFO, logger=MODULE)
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["bootstrap"]
)
mock_run_module_command = mocker.patch(f"{MODULE}.run_module_command")
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert not obj.cdk_bootstrap()
mock_gen_cmd.assert_called_once_with("bootstrap", include_context=True)
mock_run_module_command.assert_called_once_with(
cmd_list=mock_gen_cmd.return_value,
env_vars=runway_context.env.vars,
logger=obj.logger,
)
logs = "\n".join(caplog.messages)
assert "init (in progress)" in logs
assert "init (complete)" in logs
def test_cdk_bootstrap_raise_called_process_error(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_bootstrap raise CalledProcessError."""
mocker.patch.object(CloudDevelopmentKit, "gen_cmd")
mocker.patch(
f"{MODULE}.run_module_command", side_effect=CalledProcessError(1, "")
)
with pytest.raises(CalledProcessError):
CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_bootstrap()
def test_cdk_deploy(
self,
caplog: LogCaptureFixture,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_deploy."""
caplog.set_level(logging.INFO, logger=MODULE)
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["deploy"]
)
mock_run_module_command = mocker.patch(f"{MODULE}.run_module_command")
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert not obj.cdk_deploy()
mock_gen_cmd.assert_called_once_with("deploy", ['"*"'], include_context=True)
mock_run_module_command.assert_called_once_with(
cmd_list=mock_gen_cmd.return_value,
env_vars=runway_context.env.vars,
logger=obj.logger,
)
logs = "\n".join(caplog.messages)
assert "deploy (in progress)" in logs
assert "deploy (complete)" in logs
def test_cdk_deploy_raise_called_process_error(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_deploy raise CalledProcessError."""
mocker.patch.object(CloudDevelopmentKit, "gen_cmd")
mocker.patch(
f"{MODULE}.run_module_command", side_effect=CalledProcessError(1, "")
)
with pytest.raises(CalledProcessError):
CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_deploy()
def test_cdk_destroy(
self,
caplog: LogCaptureFixture,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_destroy."""
caplog.set_level(logging.INFO, logger=MODULE)
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["destroy"]
)
mock_run_module_command = mocker.patch(f"{MODULE}.run_module_command")
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert not obj.cdk_destroy()
mock_gen_cmd.assert_called_once_with("destroy", ['"*"'], include_context=True)
mock_run_module_command.assert_called_once_with(
cmd_list=mock_gen_cmd.return_value,
env_vars=runway_context.env.vars,
logger=obj.logger,
)
logs = "\n".join(caplog.messages)
assert "destroy (in progress)" in logs
assert "destroy (complete)" in logs
def test_cdk_destroy_raise_called_process_error(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_destroy raise CalledProcessError."""
mocker.patch.object(CloudDevelopmentKit, "gen_cmd")
mocker.patch(
f"{MODULE}.run_module_command", side_effect=CalledProcessError(1, "")
)
with pytest.raises(CalledProcessError):
CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_destroy()
def test_cdk_diff(
self,
caplog: LogCaptureFixture,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_diff."""
caplog.set_level(logging.INFO, logger=MODULE)
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["diff"]
)
mock_run_module_command = mocker.patch(f"{MODULE}.run_module_command")
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert not obj.cdk_diff()
mock_gen_cmd.assert_called_once_with(
"diff", args_list=None, include_context=True
)
mock_run_module_command.assert_called_once_with(
cmd_list=mock_gen_cmd.return_value,
env_vars=runway_context.env.vars,
exit_on_error=False,
logger=obj.logger,
)
logs = "\n".join(caplog.messages)
assert "plan (in progress)" in logs
assert "plan (complete)" in logs
assert not obj.cdk_diff("stack_name")
mock_gen_cmd.assert_called_with(
"diff", args_list=["stack_name"], include_context=True
)
@pytest.mark.parametrize("return_code", [1, 2])
def test_cdk_diff_catch_called_process_error_sys_exit(
self,
mocker: MockerFixture,
return_code: int,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_diff catch CalledProcessError and call sys.exit() with return code."""
mocker.patch.object(CloudDevelopmentKit, "gen_cmd")
mocker.patch(
f"{MODULE}.run_module_command",
side_effect=CalledProcessError(return_code, ""),
)
with pytest.raises(SystemExit) as excinfo:
CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_diff()
assert excinfo.value.args == (return_code,)
def test_cdk_list(
self,
fake_process: FakeProcess,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_list."""
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["list"]
)
fake_process.register_subprocess(
mock_gen_cmd.return_value, returncode=0, stdout="Stack0\nStack1"
)
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert obj.cdk_list() == ["Stack0", "Stack1"]
mock_gen_cmd.assert_called_once_with("list", include_context=True)
assert fake_process.call_count(mock_gen_cmd.return_value) == 1
def test_cdk_list_empty(
self,
fake_process: FakeProcess,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_list empty."""
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["list"]
)
fake_process.register_subprocess(
mock_gen_cmd.return_value, returncode=0, stdout=""
)
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert obj.cdk_list() == [""]
assert fake_process.call_count(mock_gen_cmd.return_value) == 1
def test_cdk_list_raise_called_process_error(
self,
fake_process: FakeProcess,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test cdk_list raise CalledProcessError."""
mock_gen_cmd = mocker.patch.object(
CloudDevelopmentKit, "gen_cmd", return_value=["list"]
)
fake_process.register_subprocess(
mock_gen_cmd.return_value,
returncode=1,
)
with pytest.raises(CalledProcessError):
CloudDevelopmentKit(runway_context, module_root=tmp_path).cdk_list()
assert fake_process.call_count(mock_gen_cmd.return_value) == 1
@pytest.mark.parametrize(
"debug, no_color, verbose, expected",
[
(False, False, False, []),
(True, False, False, ["--debug"]),
(True, True, False, ["--no-color", "--debug"]),
(True, True, True, ["--no-color", "--debug"]),
(False, True, False, ["--no-color"]),
(False, True, True, ["--no-color", "--verbose"]),
(False, False, True, ["--verbose"]),
],
)
def test_cli_args(
self,
debug: bool,
expected: List[str],
no_color: bool,
tmp_path: Path,
verbose: bool,
) -> None:
"""Test cli_args."""
assert (
CloudDevelopmentKit(
Mock(env=Mock(debug=debug, verbose=verbose), no_color=no_color),
module_root=tmp_path,
).cli_args
== expected
)
@pytest.mark.parametrize(
"parameters, expected",
[
({}, ["--context", "environment=test"]),
({"key": "val"}, ["--context", "environment=test", "--context", "key=val"]),
(
{"environment": "override", "key": "val"},
["--context", "environment=override", "--context", "key=val"],
),
({"environment": "override"}, ["--context", "environment=override"]),
],
)
def test_cli_args_context(
self,
expected: List[str],
runway_context: RunwayContext,
parameters: Dict[str, Any],
tmp_path: Path,
) -> None:
"""Test cli_args_context."""
assert (
CloudDevelopmentKit(
runway_context, module_root=tmp_path, parameters=parameters
).cli_args_context
== expected
)
@pytest.mark.parametrize("skip", [False, True])
def test_deploy(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
skip: bool,
tmp_path: Path,
) -> None:
"""Test deploy."""
mocker.patch.object(CloudDevelopmentKit, "skip", skip)
cdk_bootstrap = mocker.patch.object(CloudDevelopmentKit, "cdk_bootstrap")
cdk_deploy = mocker.patch.object(CloudDevelopmentKit, "cdk_deploy")
npm_install = mocker.patch.object(CloudDevelopmentKit, "npm_install")
run_build_steps = mocker.patch.object(CloudDevelopmentKit, "run_build_steps")
assert not CloudDevelopmentKit(runway_context, module_root=tmp_path).deploy()
if skip:
cdk_bootstrap.assert_not_called()
cdk_deploy.assert_not_called()
npm_install.assert_not_called()
run_build_steps.assert_not_called()
else:
cdk_bootstrap.assert_called_once_with()
cdk_deploy.assert_called_once_with()
npm_install.assert_called_once_with()
run_build_steps.assert_called_once_with()
@pytest.mark.parametrize("skip", [False, True])
def test_destroy(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
skip: bool,
tmp_path: Path,
) -> None:
"""Test destroy."""
mocker.patch.object(CloudDevelopmentKit, "skip", skip)
cdk_bootstrap = mocker.patch.object(CloudDevelopmentKit, "cdk_bootstrap")
cdk_destroy = mocker.patch.object(CloudDevelopmentKit, "cdk_destroy")
npm_install = mocker.patch.object(CloudDevelopmentKit, "npm_install")
run_build_steps = mocker.patch.object(CloudDevelopmentKit, "run_build_steps")
assert not CloudDevelopmentKit(runway_context, module_root=tmp_path).destroy()
cdk_bootstrap.assert_not_called()
if skip:
cdk_destroy.assert_not_called()
npm_install.assert_not_called()
run_build_steps.assert_not_called()
else:
cdk_destroy.assert_called_once_with()
npm_install.assert_called_once_with()
run_build_steps.assert_called_once_with()
@pytest.mark.parametrize(
"command, args_list, include_context, env_ci, expected",
[
(
"deploy",
['"*"'],
True,
False,
["deploy", "cli_args", '"*"', "cli_args_context"],
),
(
"deploy",
['"*"'],
True,
True,
[
"deploy",
"cli_args",
'"*"',
"cli_args_context",
"--ci",
"--require-approval=never",
],
),
(
"destroy",
['"*"'],
True,
False,
["destroy", "cli_args", '"*"', "cli_args_context"],
),
(
"destroy",
['"*"'],
True,
True,
["destroy", "cli_args", '"*"', "cli_args_context", "--force"],
),
("init", None, True, False, ["init", "cli_args", "cli_args_context"]),
("init", None, True, True, ["init", "cli_args", "cli_args_context"]),
("list", None, False, False, ["list", "cli_args"]),
("list", None, False, True, ["list", "cli_args"]),
],
)
def test_gen_cmd(
self,
args_list: Optional[List[str]],
command: CdkCommandTypeDef,
env_ci: bool,
expected: List[str],
include_context: bool,
mocker: MockerFixture,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test gen_cmd."""
mocker.patch.object(CloudDevelopmentKit, "cli_args", ["cli_args"])
mocker.patch.object(
CloudDevelopmentKit, "cli_args_context", ["cli_args_context"]
)
generate_node_command = mocker.patch(
f"{MODULE}.generate_node_command", return_value=["success"]
)
runway_context.env.ci = env_ci
obj = CloudDevelopmentKit(runway_context, module_root=tmp_path)
assert (
obj.gen_cmd(command, args_list, include_context=include_context)
== generate_node_command.return_value
)
generate_node_command.assert_called_once_with(
command="cdk",
command_opts=expected,
logger=obj.logger,
package="aws-cdk",
path=obj.path,
)
@pytest.mark.parametrize("skip", [False, True])
def test_init(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
skip: bool,
tmp_path: Path,
) -> None:
"""Test init."""
mocker.patch.object(CloudDevelopmentKit, "skip", skip)
cdk_bootstrap = mocker.patch.object(CloudDevelopmentKit, "cdk_bootstrap")
npm_install = mocker.patch.object(CloudDevelopmentKit, "npm_install")
run_build_steps = mocker.patch.object(CloudDevelopmentKit, "run_build_steps")
assert not CloudDevelopmentKit(runway_context, module_root=tmp_path).init()
if skip:
cdk_bootstrap.assert_not_called()
npm_install.assert_not_called()
run_build_steps.assert_not_called()
else:
cdk_bootstrap.assert_called_once_with()
npm_install.assert_called_once_with()
run_build_steps.assert_called_once_with()
@pytest.mark.parametrize("skip", [False, True])
def test_plan(
self,
mocker: MockerFixture,
runway_context: RunwayContext,
skip: bool,
tmp_path: Path,
) -> None:
"""Test plan."""
mocker.patch.object(CloudDevelopmentKit, "skip", skip)
cdk_bootstrap = mocker.patch.object(CloudDevelopmentKit, "cdk_bootstrap")
cdk_list = mocker.patch.object(
CloudDevelopmentKit, "cdk_list", return_value=["Stack0", "Stack1"]
)
cdk_diff = mocker.patch.object(CloudDevelopmentKit, "cdk_diff")
npm_install = mocker.patch.object(CloudDevelopmentKit, "npm_install")
run_build_steps = mocker.patch.object(CloudDevelopmentKit, "run_build_steps")
assert not CloudDevelopmentKit(runway_context, module_root=tmp_path).plan()
cdk_bootstrap.assert_not_called()
if skip:
cdk_list.assert_not_called()
cdk_diff.assert_not_called()
npm_install.assert_not_called()
run_build_steps.assert_not_called()
else:
cdk_list.assert_called_once_with()
cdk_diff.assert_has_calls([call("Stack0"), call("Stack1")])
npm_install.assert_called_once_with()
run_build_steps.assert_called_once_with()
def test_run_build_steps_empty(
self,
caplog: LogCaptureFixture,
fake_process: FakeProcess,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test run_build_steps."""
caplog.set_level(logging.INFO, logger=MODULE)
obj = CloudDevelopmentKit(
runway_context, module_root=tmp_path, options={"build_steps": []}
)
assert not obj.run_build_steps()
logs = "\n".join(caplog.messages)
assert "build steps (in progress)" not in logs
assert "build steps (complete)" not in logs
def test_run_build_steps_linux(
self,
caplog: LogCaptureFixture,
fake_process: FakeProcess,
mocker: MockerFixture,
platform_linux: None,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test run_build_steps."""
caplog.set_level(logging.INFO, logger=MODULE)
fix_windows_command_list = mocker.patch(f"{MODULE}.fix_windows_command_list")
fake_process.register_subprocess(["test", "step"], returncode=0)
obj = CloudDevelopmentKit(
runway_context, module_root=tmp_path, options={"build_steps": ["test step"]}
)
assert not obj.run_build_steps()
fix_windows_command_list.assert_not_called()
assert fake_process.call_count(["test", "step"]) == 1
logs = "\n".join(caplog.messages)
assert "build steps (in progress)" in logs
assert "build steps (complete)" in logs
def test_run_build_steps_raise_file_not_found(
self,
caplog: LogCaptureFixture,
fake_process: FakeProcess,
platform_linux: None,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test run_build_steps."""
caplog.set_level(logging.ERROR, MODULE)
def _callback(process: FakePopen) -> None:
process.returncode = 1
raise FileNotFoundError
fake_process.register_subprocess(["test", "step"], callback=_callback)
with pytest.raises(FileNotFoundError):
CloudDevelopmentKit(
runway_context,
module_root=tmp_path,
options={"build_steps": ["test step"]},
).run_build_steps()
assert fake_process.call_count(["test", "step"]) == 1
assert "failed to find it" in "\n".join(caplog.messages)
def test_run_build_steps_raise_called_process_error(
self,
fake_process: FakeProcess,
platform_linux: None,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test run_build_steps."""
fake_process.register_subprocess(["test", "step"], returncode=1)
with pytest.raises(CalledProcessError):
CloudDevelopmentKit(
runway_context,
module_root=tmp_path,
options={"build_steps": ["test step"]},
).run_build_steps()
assert fake_process.call_count(["test", "step"]) == 1
def test_run_build_steps_windows(
self,
caplog: LogCaptureFixture,
fake_process: FakeProcess,
mocker: MockerFixture,
platform_windows: None,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test run_build_steps."""
caplog.set_level(logging.INFO, logger=MODULE)
fix_windows_command_list = mocker.patch(
f"{MODULE}.fix_windows_command_list", return_value=["test", "step"]
)
fake_process.register_subprocess(["test", "step"], returncode=0)
obj = CloudDevelopmentKit(
runway_context, module_root=tmp_path, options={"build_steps": ["test step"]}
)
assert not obj.run_build_steps()
fix_windows_command_list.assert_called_once_with(["test", "step"])
assert fake_process.call_count(["test", "step"]) == 1
logs = "\n".join(caplog.messages)
assert "build steps (in progress)" in logs
assert "build steps (complete)" in logs
@pytest.mark.parametrize(
"explicitly_enabled, package_json_missing, expected",
[
(False, False, True),
(True, False, False),
(True, True, True),
(False, True, True),
],
)
def test_skip(
self,
caplog: LogCaptureFixture,
expected: bool,
explicitly_enabled: bool,
mocker: MockerFixture,
package_json_missing: bool,
runway_context: RunwayContext,
tmp_path: Path,
) -> None:
"""Test skip."""
caplog.set_level(logging.INFO, logger=MODULE)
mocker.patch.object(
CloudDevelopmentKit,
"package_json_missing",
return_value=package_json_missing,
)
assert (
CloudDevelopmentKit(
runway_context,
explicitly_enabled=explicitly_enabled,
module_root=tmp_path,
).skip
is expected
)
if package_json_missing:
assert "skipped; package.json" in "\n".join(caplog.messages)
elif not explicitly_enabled:
assert "skipped; environment required but not defined" in "\n".join(
caplog.messages
)
class TestCloudDevelopmentKitOptions:
"""Test CloudDevelopmentKitOptions."""
def test___init__(self) -> None:
"""Test __init__."""
data = RunwayCdkModuleOptionsDataModel(build_steps=["test"])
obj = CloudDevelopmentKitOptions(data)
assert obj.build_steps == data.build_steps
assert obj.skip_npm_ci == data.skip_npm_ci
def test_parse_obj(self) -> None:
"""Test parse_obj."""
config = {"build_steps": ["test-cmd"], "skip_npm_ci": True, "key": "val"}
obj = CloudDevelopmentKitOptions.parse_obj(config)
assert isinstance(obj.data, RunwayCdkModuleOptionsDataModel)
assert obj.data.build_steps == config["build_steps"]
assert obj.data.skip_npm_ci == config["skip_npm_ci"]
assert "key" not in obj.data.dict()
|
tests/test_spatial_crop.py | dyollb/MONAI | 2,971 | 11153639 | <filename>tests/test_spatial_crop.py
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import SpatialCrop
from tests.utils import TEST_NDARRAYS, assert_allclose
TESTS = [
[{"roi_center": [1, 1, 1], "roi_size": [2, 2, 2]}, (3, 3, 3, 3), (3, 2, 2, 2)],
[{"roi_start": [0, 0, 0], "roi_end": [2, 2, 2]}, (3, 3, 3, 3), (3, 2, 2, 2)],
[{"roi_start": [0, 0], "roi_end": [2, 2]}, (3, 3, 3, 3), (3, 2, 2, 3)],
[{"roi_start": [0, 0, 0, 0, 0], "roi_end": [2, 2, 2, 2, 2]}, (3, 3, 3, 3), (3, 2, 2, 2)],
[{"roi_start": [0, 0, 0, 0, 0], "roi_end": [8, 8, 8, 2, 2]}, (3, 3, 3, 3), (3, 3, 3, 3)],
[{"roi_start": [1, 0, 0], "roi_end": [1, 8, 8]}, (3, 3, 3, 3), (3, 0, 3, 3)],
[{"roi_slices": [slice(s, e) for s, e in zip([-1, -2, 0], [None, None, 2])]}, (3, 3, 3, 3), (3, 1, 2, 2)],
]
TEST_ERRORS = [[{"roi_slices": [slice(s, e, 2) for s, e in zip([-1, -2, 0], [None, None, 2])]}]]
class TestSpatialCrop(unittest.TestCase):
@parameterized.expand(TESTS)
def test_shape(self, input_param, input_shape, expected_shape):
input_data = np.random.randint(0, 2, size=input_shape)
results = []
for p in TEST_NDARRAYS:
for q in TEST_NDARRAYS + (None,):
input_param_mod = {
k: q(v) if k != "roi_slices" and q is not None else v for k, v in input_param.items()
}
im = p(input_data)
result = SpatialCrop(**input_param_mod)(im)
self.assertEqual(type(im), type(result))
if isinstance(result, torch.Tensor):
self.assertEqual(result.device, im.device)
self.assertTupleEqual(result.shape, expected_shape)
results.append(result)
if len(results) > 1:
assert_allclose(results[0], results[-1], type_test=False)
@parameterized.expand(TEST_ERRORS)
def test_error(self, input_param):
with self.assertRaises(ValueError):
SpatialCrop(**input_param)
if __name__ == "__main__":
unittest.main()
|
homeassistant/components/tellduslive/binary_sensor.py | MrDelik/core | 30,023 | 11153680 | """Support for binary sensors using Tellstick Net."""
from homeassistant.components import binary_sensor, tellduslive
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .entry import TelldusLiveEntity
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up tellduslive sensors dynamically."""
async def async_discover_binary_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveSensor(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(
binary_sensor.DOMAIN, tellduslive.DOMAIN
),
async_discover_binary_sensor,
)
class TelldusLiveSensor(TelldusLiveEntity, BinarySensorEntity):
"""Representation of a Tellstick sensor."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
|
data/datasets/joints_dataset.py | yihui-he2020/epipolar-transformers | 360 | 11153685 | <filename>data/datasets/joints_dataset.py
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>), modified by <NAME>
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import copy
import random
import numpy as np
import os.path as osp
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from data.transforms.image import get_affine_transform
from data.transforms.image import affine_transform
from data.transforms.keypoints2d import Heatmapcreator
from vision.multiview import coord2pix
from core import cfg
from .base_dataset import BaseDataset
class JointsDataset(BaseDataset):
def __init__(self, root, subset, is_train, transform=None):
self.heatmapcreator = Heatmapcreator(
cfg.KEYPOINT.HEATMAP_SIZE,
cfg.KEYPOINT.SIGMA,
cfg.BACKBONE.DOWNSAMPLE)
self.is_train = is_train
self.subset = subset
self.root = root
self.data_format = cfg.DATASETS.DATA_FORMAT
self.scale_factor = cfg.DATASETS.SCALE_FACTOR
self.rotation_factor = cfg.DATASETS.ROT_FACTOR
self.image_size = cfg.DATASETS.IMAGE_SIZE #NETWORK.IMAGE_SIZE
self.heatmap_size = cfg.KEYPOINT.HEATMAP_SIZE #NETWORK.HEATMAP_SIZE
self.sigma = cfg.KEYPOINT.SIGMA #NETWORK.SIGMA
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.db = []
self.num_joints = cfg.KEYPOINT.NUM_PTS
self.union_joints = {
0: 'root',
1: 'rhip',
2: 'rkne',
3: 'rank',
4: 'lhip',
5: 'lkne',
6: 'lank',
7: 'belly',
8: 'thorax', #new
9: 'neck',
10: 'upper neck', #new
11: 'nose',
12: 'head',
13: 'head top', #new
14: 'lsho',
15: 'lelb',
16: 'lwri',
17: 'rsho',
18: 'relb',
19: 'rwri'
}
#mask
# np.array([0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 9 , 11, 12, 14, 15, 16, 17, 18, 19])
#self.actual_joints = {
# 0: 'root',
# 1: 'rhip',
# 2: 'rkne',
# 3: 'rank',
# 4: 'lhip',
# 5: 'lkne',
# 6: 'lank',
# 7: 'belly',
# 8: 'neck',
# 9: 'nose',
# 10: 'head',
# 11: 'lsho',
# 12: 'lelb',
# 13: 'lwri',
# 14: 'rsho',
# 15: 'relb',
# 16: 'rwri'
#}
self.actual_joints = {}
self.u2a_mapping = {}
if cfg.DATALOADER.BENCHMARK:
from utils.timer import Timer
self.timer = Timer()
self.timer0 = Timer()
if cfg.VIS.H36M:
self.checked = []
# def compute_distorted_meshgrid(self, image, fx, fy, cx, cy, k, p):
# h, w = image.shape[:2]
# print('h ', h, 'w', w, 'cx', cx, 'cy', cy, 'fx', fx, 'fy', fy, 'p', p, 'k',k)
# grid_x = (np.arange(w, dtype=np.float32) - cx) / fx
# grid_y = (np.arange(h, dtype=np.float32) - cy) / fy
# meshgrid = np.stack(np.meshgrid(grid_x, grid_y), axis=2).reshape(-1, 2)
# r2 = meshgrid[:, 0] ** 2 + meshgrid[:, 1] ** 2
# radial = meshgrid * (1 + k[0] * r2 + k[1] * r2**2 + k[2] * r2**3).reshape(-1, 1)
# tangential_1 = p.reshape(1, 2) * np.broadcast_to(meshgrid[:, 0:1] * meshgrid[:, 1:2], (len(meshgrid), 2))
# tangential_2 = p[::-1].reshape(1, 2) * (meshgrid**2 + np.broadcast_to(r2.reshape(-1, 1), (len(meshgrid), 2)))
# meshgrid = radial + tangential_1 + tangential_2
# # move back to screen coordinates
# meshgrid *= np.array([fx, fy]).reshape(1, 2)
# meshgrid += np.array([cx, cy]).reshape(1, 2)
# # cache (save) distortion maps
# meshgrid_int16 = cv2.convertMaps(meshgrid.reshape((h, w, 2)), None, cv2.CV_16SC2)
# image_undistorted = cv2.remap(image, *meshgrid_int16, cv2.INTER_CUBIC)
# #meshgrid_int16 = meshgrid.reshape(h, w, 2)
# #image_undistorted = cv2.remap(image, meshgrid_int16, cv2.INTER_CUBIC)
# return image_undistorted
def get_mapping(self):
union_keys = list(self.union_joints.keys())
union_values = list(self.union_joints.values())
mapping = {k: '*' for k in union_keys}
for k, v in self.actual_joints.items():
idx = union_values.index(v)
key = union_keys[idx]
mapping[key] = k
return mapping
def do_mapping(self):
mapping = self.u2a_mapping
for item in self.db:
joints = item['joints_2d']
joints_vis = item['joints_vis']
njoints = len(mapping)
joints_union = np.zeros(shape=(njoints, 2))
joints_union_vis = np.zeros(shape=(njoints, 3))
for i in range(njoints):
if mapping[i] != '*':
index = int(mapping[i])
joints_union[i] = joints[index]
joints_union_vis[i] = joints_vis[index]
item['joints_2d'] = joints_union
item['joints_vis'] = joints_union_vis
def _get_db(self):
raise NotImplementedError
def get_key_str(self, datum):
return 's_{:02}_act_{:02}_subact_{:02}_imgid_{:06}'.format(
datum['subject'], datum['action'], datum['subaction'],
datum['image_id'])
def evaluate(self, preds, *args, **kwargs):
raise NotImplementedError
def __len__(self):
return len(self.db)
def isdamaged(self, db_rec):
#damaged seq
#'Greeting-2', 'SittingDown-2', 'Waiting-1'
if db_rec['subject'] == 9:
if db_rec['action'] != 5 or db_rec['subaction'] != 2:
if db_rec['action'] != 10 or db_rec['subaction'] != 2:
if db_rec['action'] != 13 or db_rec['subaction'] != 1:
return False
else:
return False
return True
def __getitem__(self, idx):
if cfg.DATALOADER.BENCHMARK: self.timer0.tic()
db_rec = copy.deepcopy(self.db[idx])
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
if cfg.VIS.H36M:
#seq = (db_rec['subject'], db_rec['action'], db_rec['subaction'])
#if not seq in self.checked:
# print(seq)
# print(self.isdamaged(db_rec))
# self.checked.append(seq)
#else:
# return np.ones(2)
print(db_rec['image'])
# print(db_rec['image'])
if self.data_format == 'undistoredzip':
image_dir = 'undistoredimages.zip@'
elif self.data_format == 'zip':
image_dir = 'images.zip@'
else:
image_dir = ''
image_file = osp.join(self.root, db_rec['source'], image_dir, 'images',
db_rec['image'])
if 'zip' in self.data_format:
from utils import zipreader
data_numpy = zipreader.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
else:
data_numpy = cv2.imread(
image_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# crop image from 1002 x 1000 to 1000 x 1000
data_numpy = data_numpy[:1000]
assert data_numpy.shape == (1000, 1000, 3), data_numpy.shape
joints = db_rec['joints_2d'].copy()
joints_3d = db_rec['joints_3d'].copy()
joints_3d_camera = db_rec['joints_3d_camera'].copy()
joints_3d_camera_normed = joints_3d_camera - joints_3d_camera[0]
keypoint_scale = np.linalg.norm(joints_3d_camera_normed[8] - joints_3d_camera_normed[0])
joints_3d_camera_normed /= keypoint_scale
if cfg.DATALOADER.BENCHMARK:
assert joints.shape[0] == cfg.KEYPOINT.NUM_PTS, joints.shape[0]
#assert db_rec['joints_3d'].shape[0] == cfg.KEYPOINT.NUM_PTS,db_rec['joints_3d'].shape[0]
center = np.array(db_rec['center']).copy()
joints_vis = db_rec['joints_vis'].copy()
scale = np.array(db_rec['scale']).copy()
#undistort
camera = db_rec['camera']
R = camera['R'].copy()
rotation = 0
K = np.array([
[float(camera['fx']), 0, float(camera['cx'])],
[0, float(camera['fy']), float(camera['cy'])],
[0, 0, 1.],
])
T = camera['T'].copy()
world3d = (R.T @ joints_3d_camera.T + T).T
Rt = np.zeros((3, 4))
Rt[:, :3] = R
Rt[:, 3] = -R @ T.squeeze()
# Rt[:, :3] = R.T
# Rt[:, 3] = T.squeeze()
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
if cfg.VIS.H36M:
if not np.isclose(world3d, joints_3d).all():
print('world3d difference')
print(world3d)
print('joints_3d')
print(joints_3d)
from IPython import embed
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig = plt.figure(1)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
ax1.imshow(data_numpy[..., ::-1])
ax1.set_title('raw')
#0.058 s
distCoeffs = np.array([float(i) for i in [camera['k'][0], camera['k'][1], camera['p'][0], camera['p'][1], camera['k'][2]]])
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
if self.data_format != 'undistoredzip':
data_numpy = cv2.undistort(data_numpy, K, distCoeffs)
#0.30 s
if cfg.DATALOADER.BENCHMARK: print('timer0', self.timer0.toc())
if cfg.DATALOADER.BENCHMARK: self.timer.tic()
if cfg.VIS.H36M:
ax1.scatter(joints[:, 0], joints[:, 1], color='green')
imagePoints, _ = cv2.projectPoints(joints_3d[:, None, :], (0,0,0), (0,0,0), K, distCoeffs)
imagePoints = imagePoints.squeeze()
ax1.scatter(imagePoints[:, 0], imagePoints[:, 1], color='yellow')
from vision.multiview import project_point_radial
camera = db_rec['camera']
f = (K[0, 0] + K[1, 1])/2.
c = K[:2, 2].reshape((2, 1))
iccv19Points = project_point_radial(joints_3d_camera, f, c, camera['k'], camera['p'])
ax1.scatter(iccv19Points[:, 0], iccv19Points[:, 1], color='blue')
# trans1 = get_affine_transform(center, scale, rotation, self.image_size)
# box1 = affine_transform(np.array([[0, 0], [999, 999]]), trans1)
# print(box1)
# rect1 = patches.Rectangle(box1[0],box1[1][0] - box1[0][0],box1[1][1] - box1[0][1],linewidth=1,edgecolor='r',facecolor='none')
# ax1.add_patch(rect1)
# print(joints, joints.shape, center.shape)
joints = cv2.undistortPoints(joints[:, None, :], K, distCoeffs, P=K).squeeze()
center = cv2.undistortPoints(np.array(center)[None, None, :], K, distCoeffs, P=K).squeeze()
#data_numpy = self.compute_distorted_meshgrid(data_numpy ,
# float(camera['fx']),
# float(camera['fy']),
# float(camera['cx']),
# float(camera['cy']),
# np.array([float(i) for i in camera['k']]),
# np.array([float(i) for i in camera['p']]))
if self.is_train:
sf = self.scale_factor
rf = self.rotation_factor
scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
rotation = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) \
if random.random() <= 0.6 else 0
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
if cfg.VIS.H36M:
# print(joints.shape, center.shape)
# print(trans)
ax2.imshow(data_numpy[..., ::-1])
projected2d = K.dot(joints_3d_camera.T)
projected2d[:2] = projected2d[:2] / projected2d[-1]
ax1.scatter(projected2d[0], projected2d[1], color='red')
ax2.scatter(joints[:, 0], joints[:, 1], color='green')
ax2.scatter(projected2d[0], projected2d[1], color='red')
# box1 = affine_transform(np.array([[0, 0], [999, 999]]), trans)
# rect1 = patches.Rectangle(box1[0],box1[1][0] - box1[0][0],box1[1][1] - box1[0][1],linewidth=1,edgecolor='r',facecolor='none')
# ax2.add_patch(rect1)
ax2.set_title('undistort')
#input = data_numpy
trans = get_affine_transform(center, scale, rotation, self.image_size)
cropK = np.concatenate((trans, np.array([[0., 0., 1.]])), 0).dot(K)
KRT = cropK.dot(Rt)
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
input = cv2.warpAffine(
data_numpy,
trans, (int(self.image_size[0]), int(self.image_size[1])),
flags=cv2.INTER_LINEAR)
# 0.31 s
for i in range(self.num_joints):
if joints_vis[i, 0] > 0.0:
joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
if (np.min(joints[i, :2]) < 0 or
joints[i, 0] >= self.image_size[0] or
joints[i, 1] >= self.image_size[1]):
joints_vis[i, :] = 0
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
if cfg.VIS.H36M:
ax3.imshow(input[..., ::-1])
# ax3.scatter(joints[:, 0], joints[:, 1])
# projected2d = KRT.dot(np.concatenate((db_rec['joints_3d'], np.ones( (len(db_rec['joints_3d']), 1))), 1).T)
ax3.scatter(joints[:, 0], joints[:, 1])
ax3.set_title('cropped')
ax4.imshow(input[..., ::-1])
# ax4.scatter(joints[:, 0], joints[:, 1])
# projected2d = KRT.dot(np.concatenate((db_rec['joints_3d'], np.ones( (len(db_rec['joints_3d']), 1))), 1).T)
projected2d = cropK.dot(joints_3d_camera.T)
projected2d[:2] = projected2d[:2] / projected2d[-1]
#ax4.scatter(joints[:, 0], joints[:, 1], color='green')
#ax4.scatter(projected2d[0], projected2d[1], color='red')
ax4.scatter(joints[-2:, 0], joints[-2:, 1], color='green')
ax4.scatter(projected2d[0, -2:], projected2d[1, -2:], color='red')
ax4.set_title('cropped, project 3d to 2d')
if self.transform:
input = self.transform(input)
target = self.heatmapcreator.get(joints)
target = target.reshape((-1, target.shape[1], target.shape[2]))
target_weight = joints_vis[:, 0, None]
## inaccurate heatmap
#target, target_weight = self.generate_target(joints, joints_vis)
# target = torch.from_numpy(target).float()
# target_weight = torch.from_numpy(target_weight)
if cfg.VIS.H36M:
#ax5.imshow(target.max(0)[0])
#ax5.scatter(coord2pix(joints[:, 0], 4), coord2pix(joints[:, 1], 4), color='green')
from modeling.backbones.basic_batch import find_tensor_peak_batch
# pred_joints, _ = find_tensor_peak_batch(target, self.sigma, cfg.BACKBONE.DOWNSAMPLE)
# ax5.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='blue')
# ax6.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='blue')
heatmap_by_creator = self.heatmapcreator.get(joints)
heatmap_by_creator = heatmap_by_creator.reshape((-1, heatmap_by_creator.shape[1], heatmap_by_creator.shape[2]))
ax6.imshow(heatmap_by_creator.max(0))
ax6.scatter(coord2pix(joints[:, 0], 4), coord2pix(joints[:, 1], 4), color='green')
# pred_joints, _ = find_tensor_peak_batch(torch.from_numpy(heatmap_by_creator).float(), self.sigma, cfg.BACKBONE.DOWNSAMPLE)
# print('creator found', pred_joints)
# ax5.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='red')
# ax6.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='red')
plt.show()
ret = {
'heatmap': target,
'visibility':target_weight,
'KRT': KRT,
'points-2d': joints,
'points-3d': world3d.astype(np.double) if 'lifting' not in cfg.DATASETS.TASK else world3d,
'camera-points-3d': joints_3d_camera,
'normed-points-3d': joints_3d_camera_normed,
'scale': keypoint_scale,
'action' : torch.tensor([db_rec['action']]),
'img-path': db_rec['image'],
}
if cfg.DATASETS.TASK not in ['lifting', 'lifting_direct', 'lifting_rot']:
ret['img'] = input
ret['K'] = cropK
ret['RT'] = Rt
if cfg.VIS.MULTIVIEWH36M:
ret['T'] = T
ret['R'] = R
ret['original_image'] = data_numpy
if cfg.KEYPOINT.TRIANGULATION == 'rpsm' and not self.is_train:
ret['origK'] = K
ret['crop_center'] = center
ret['crop_scale'] = scale
if cfg.DATALOADER.BENCHMARK: print('timer1', self.timer.toc())
return ret
# meta = {
# 'scale': scale,
# 'center': center,
# 'rotation': rotation,
# 'joints_2d': db_rec['joints_2d'],
# 'joints_2d_transformed': joints,
# 'joints_vis': joints_vis,
# 'source': db_rec['source']
# }
#return input, target, target_weight, meta
def generate_target(self, joints_3d, joints_vis):
target, weight = self.generate_heatmap(joints_3d, joints_vis)
return target, weight
def generate_heatmap(self, joints, joints_vis):
'''
:param joints: [num_joints, 3]
:param joints_vis: [num_joints, 3]
:return: target, target_weight(1: visible, 0: invisible)
'''
target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
target_weight[:, 0] = joints_vis[:, 0]
target = np.zeros(
(self.num_joints, self.heatmap_size[1], self.heatmap_size[0]),
dtype=np.float32)
tmp_size = self.sigma * 3
for joint_id in range(self.num_joints):
feat_stride = np.zeros(2)
feat_stride[0] = self.image_size[0] / self.heatmap_size[0]
feat_stride[1] = self.image_size[1] / self.heatmap_size[1]
mu_x = int(coord2pix(joints[joint_id][0], feat_stride[0]) + 0.5)
mu_y = int(coord2pix(joints[joint_id][1], feat_stride[1]) + 0.5)
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
or br[0] < 0 or br[1] < 0:
target_weight[joint_id] = 0
continue
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * self.sigma**2))
g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return target, target_weight
|
proxypool/conf.py | Lemon2go/ProxyPool | 359 | 11153701 | # Redis Host
HOST = 'localhost'
# Redis PORT
PORT = 6379
# Redis 中存放代理池的 Set 名
POOL_NAME = 'proxies'
# Pool 的低阈值和高阈值
POOL_LOWER_THRESHOLD = 10
POOL_UPPER_THRESHOLD = 40
# 两个调度进程的周期
VALID_CHECK_CYCLE = 600
POOL_LEN_CHECK_CYCLE = 20
# headers
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
|
backUp/jd_spider_requests.py | lllrrr2/jd-3 | 354 | 11153703 | <filename>backUp/jd_spider_requests.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
环境变量
export wy_spike_cookie="" # 抢购账号的cookie
export wy_spike_wy_eid="I2KGUFMH75SR5GXA65CVOTSKXJRPUIXVJREX23DWXYHUC7PSCXD5277TIQSFTHYRRB4VPW4CVBVMD5FRGT7HEE4W6U" # eid
export wy_spike_fp="e7f3d2960def70d91633928c492950bb" # fp
export wy_spike_sku_id="" # 商品的sku_id
export wy_spike_payment_pwd="" # 支付密码,python脚本不能加密,不放心自己看代码,看不懂就别用
'''
# 脚本内填写,填了环境变量就不用填脚本内的,都填了则优先使用环境变量
wy_spike_cookie="" # 抢购账号的cookie
wy_spike_wy_eid="" # eid
wy_spike_fp="" # fp
wy_spike_sku_id="" # 商品的sku_id
wy_spike_payment_pwd="" # 支付密码,python脚本不能加密,不放心自己看代码,看不懂就别用
import os
import sys
sys.path.append(os.path.abspath('../../tmp'))
import random
import time
import functools
import json
import pickle
from concurrent.futures import ProcessPoolExecutor
try:
import requests
except Exception as e:
print(str(e) + "\n缺少requests模块, 请执行命令:pip3 install requests\n")
requests.packages.urllib3.disable_warnings()
# 13位时间戳
def gettimestamp():
return str(int(time.time() * 1000))
# 读取环境变量
def get_env(env):
try:
if env in os.environ:
a=os.environ[env]
elif '/ql' in os.path.abspath(os.path.dirname(__file__)):
try:
a=v4_env(env,'/ql/config/config.sh')
except:
a=eval(env)
elif '/jd' in os.path.abspath(os.path.dirname(__file__)):
try:
a=v4_env(env,'/jd/config/config.sh')
except:
a=eval(env)
else:
a=eval(env)
except:
a=False
return a
# v4
def v4_env(env,paths):
b=re.compile(r'(?:export )?'+env+r' ?= ?[\"\'](.*?)[\"\']', re.I)
with open(paths, 'r') as f:
for line in f.readlines():
try:
c=b.match(line).group(1)
break
except:
pass
return c
USER_AGENTS = [
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.4; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2225.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 4.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2309.372 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.2117.157 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1866.237 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36 Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.517 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.62 Safari/537.36",
"Mozilla/5.0 (X11; CrOS i686 4319.74.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.2 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1467.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1500.55 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.90 Safari/537.36",
"Mozilla/5.0 (X11; NetBSD) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36",
"Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.60 Safari/537.17",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.15 (KHTML, like Gecko) Chrome/24.0.1295.0 Safari/537.15",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.14 (KHTML, like Gecko) Chrome/24.0.1292.0 Safari/537.14"
]
# 随机ua
def ua():
return random.choice(USER_AGENTS)
# session
def SpiderSession():
cookie=get_env('wy_spike_cookie')
a=dict()
cookie=cookie.split(';')
pt_key=cookie[0].split('=')
pt_pin=cookie[1].split('=')
a[pt_key[0]]=pt_key[1]
a[pt_pin[0]]=pt_pin[1]
a = requests.utils.cookiejar_from_dict(a)
session=requests.session()
session.headers={
"user-agent": ua(),
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,image/apng,*/*;"
"q=0.8,application/signed-exchange;"
"v=b3",
"Connection": "keep-alive"
}
session.cookies=a
return session
def parse_json(s):
begin = s.find('{')
end = s.rfind('}') + 1
return json.loads(s[begin:end])
class JdSeckill(object):
def __init__(self):
# 初始化信息
self.sku_id = int(get_env('wy_spike_sku_id'))
self.seckill_num = 1
self.seckill_init_info = dict()
self.seckill_url = dict()
self.seckill_order_data = dict()
self.session = SpiderSession()
self.nick_name = None
# def get_sku_title(self):
# """获取商品名称"""
# url = f'https://item.jd.com/{self.sku_id}.html'
# resp = self.session.get(url).text
# x_data = etree.HTML(resp)
# sku_title = x_data.xpath('/html/head/title/text()')
# return sku_title[0]
def get_seckill_url(self):
"""获取商品的抢购链接
点击"抢购"按钮后,会有两次302跳转,最后到达订单结算页面
这里返回第一次跳转后的页面url,作为商品的抢购链接
:return: 商品的抢购链接
"""
url = 'https://itemko.jd.com/itemShowBtn'
payload = {
'callback': f'jQuery{random.randint(1000000, 9999999)}',
'skuId': self.sku_id,
'from': 'pc',
'_': gettimestamp(),
}
headers = {
'cookie': 'pt_key=app_openAAJhetAUADCCRtiN8iFDEMFcXRcPqFwg8s7ViToZwHZUtMpkxNEWvAYHd9bWJYyz63OpnoSSxVE;pt_pin=jd_6187eef06db59;',
'user-agent': ua(),
'Host': 'itemko.jd.com',
'Referer': f'https://item.jd.com/{self.sku_id}.html',
}
for n in range(9):
resp = self.session.get(url=url, headers=headers, params=payload)
resp_json = parse_json(resp.text)
print(resp_json)
if resp_json.get('url'):
# https://divide.jd.com/user_routing?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc
router_url = 'https:' + resp_json.get('url')
# https://marathon.jd.com/captcha.html?skuId=8654289&sn=c3f4ececd8461f0e4d7267e96a91e0e0&from=pc
seckill_url = router_url.replace(
'divide', 'marathon').replace(
'user_routing', 'captcha.html')
print(f"抢购链接获取成功: {seckill_url}")
return seckill_url
else:
print("抢购链接获取失败,正在重试")
def request_seckill_url(self):
"""访问商品的抢购链接(用于设置cookie等"""
# print('用户:{}'.format(self.get_username()))
# print('商品名称:{}'.format(self.get_sku_title()))
# self.timers.start()
self.seckill_url[self.sku_id] = self.get_seckill_url()
print('访问商品的抢购连接...')
headers = {
'user-agent': ua(),
'Host': 'marathon.jd.com',
'Referer': f'https://item.jd.com/{self.sku_id}.html',
}
self.session.get(
url=self.seckill_url.get(
self.sku_id),
headers=headers,
allow_redirects=False)
def request_seckill_checkout_page(self):
"""访问抢购订单结算页面"""
print('访问抢购订单结算页面...')
url = 'https://marathon.jd.com/seckill/seckill.action'
payload = {
'skuId': self.sku_id,
'num': self.seckill_num,
'rid': int(time.time())
}
headers = {
'user-agent': ua(),
'Host': 'marathon.jd.com',
'Referer': f'https://item.jd.com/{self.sku_id}.html',
}
self.session.get(url=url, params=payload, headers=headers, allow_redirects=False)
def _get_seckill_init_info(self):
"""获取秒杀初始化信息(包括:地址,发票,token)
:return: 初始化信息组成的dict
"""
print('获取秒杀初始化信息...')
url = f'https://trade.jd.com/shopping/dynamic/consignee/getConsigneeList.action?charset=UTF-8&callback=jQuery4383861&_={gettimestamp()}'
data = {
'sku': self.sku_id,
'num': self.seckill_num,
'isModifyAddress': 'false',
}
headers = {
'user-agent': ua(),
'Host': 'trade.jd.com',
}
resp = self.session.get(url=url, headers=headers)
resp_json = None
try:
resp_json = parse_json(resp.text)
except Exception:
raise SKException(f'抢购失败,返回信息:{resp.text[0: 128]}')
return resp_json
def _get_seckill_order_data(self):
"""生成提交抢购订单所需的请求体参数
:return: 请求体参数组成的dict
"""
print('生成提交抢购订单所需参数...')
# 获取用户秒杀初始化信息
self.seckill_init_info[self.sku_id] = self._get_seckill_init_info()
init_info = self.seckill_init_info.get(self.sku_id)
default_address = init_info['addressList'][0] # 默认地址dict
invoice_info = init_info.get('invoiceInfo', {}) # 默认发票信息dict, 有可能不返回
token = init_info['token']
data = {
'skuId': self.sku_id,
'num': self.seckill_num,
'addressId': default_address['id'],
'yuShou': 'true',
'isModifyAddress': 'false',
'name': default_address['name'],
'provinceId': default_address['provinceId'],
'cityId': default_address['cityId'],
'countyId': default_address['countyId'],
'townId': default_address['townId'],
'addressDetail': default_address['addressDetail'],
'mobile': default_address['mobile'],
'mobileKey': default_address['mobileKey'],
'email': default_address.get('email', ''),
'postCode': '',
'invoiceTitle': invoice_info.get('invoiceTitle', -1),
'invoiceCompanyName': '',
'invoiceContent': invoice_info.get('invoiceContentType', 1),
'invoiceTaxpayerNO': '',
'invoiceEmail': '',
'invoicePhone': invoice_info.get('invoicePhone', ''),
'invoicePhoneKey': invoice_info.get('invoicePhoneKey', ''),
'invoice': 'true' if invoice_info else 'false',
'password': get_env('wy_spike_payment_pwd'),
'codTimeType': 3,
'paymentType': 4,
'areaCode': '',
'overseas': 0,
'phone': '',
'eid': get_env('wy_spike_eid'),
'fp': get_env('wy_spike_fp'),
'token': token,
'pru': ''
}
return data
def submit_seckill_order(self):
"""提交抢购(秒杀)订单
:return: 抢购结果 True/False
"""
url = 'https://marathon.jd.com/seckillnew/orderService/pc/submitOrder.action'
payload = {
'skuId': self.sku_id,
}
try:
self.seckill_order_data[self.sku_id] = self._get_seckill_order_data()
except Exception as e:
print(f'抢购失败,无法获取生成订单的基本信息,接口返回:【{str(e)}】')
return False
print('提交抢购订单...')
headers = {
'user-agent': ua(),
'Host': 'marathon.jd.com',
'Referer': f'https://marathon.jd.com/seckill/seckill.action?skuId={self.sku_id}&num={self.seckill_num}&rid={int(time.time())}',
}
resp = self.session.post(
url=url,
params=payload,
data=self.seckill_order_data.get(
self.sku_id),
headers=headers)
resp_json = None
try:
resp_json = parse_json(resp.text)
except Exception as e:
print(f'抢购失败,返回信息:{resp.text[0: 128]}')
return False
# 返回信息
# 抢购失败:
# {'errorMessage': '很遗憾没有抢到,再接再厉哦。', 'orderId': 0, 'resultCode': 60074, 'skuId': 0, 'success': False}
# {'errorMessage': '抱歉,您提交过快,请稍后再提交订单!', 'orderId': 0, 'resultCode': 60017, 'skuId': 0, 'success': False}
# {'errorMessage': '系统正在开小差,请重试~~', 'orderId': 0, 'resultCode': 90013, 'skuId': 0, 'success': False}
# 抢购成功:
# {"appUrl":"xxxxx","orderId":820227xxxxx,"pcUrl":"xxxxx","resultCode":0,"skuId":0,"success":true,"totalMoney":"xxxxx"}
if resp_json.get('success'):
order_id = resp_json.get('orderId')
total_money = resp_json.get('totalMoney')
pay_url = 'https:' + resp_json.get('pcUrl')
print(f'抢购成功,订单号:{order_id}, 总价:{total_money}, 电脑端付款链接:{pay_url}')
return True
else:
print(f'抢购失败,返回信息:{resp_json}')
return False
def seckill_by_proc_pool(self, work_count=3):
"""
多进程进行抢购
work_count:进程数量
"""
with ProcessPoolExecutor(work_count) as pool:
for i in range(work_count):
pool.submit(self._seckill)
def _seckill(self):
"""
抢购
"""
for n in range(9):
try:
self.request_seckill_url()
while True:
self.request_seckill_checkout_page()
self.submit_seckill_order()
except Exception as e:
print('抢购发生异常,稍后继续执行!', e)
def main():
JdSeckill().seckill_by_proc_pool()
if __name__ == '__main__':
main()
|
etl/parsers/etw/Microsoft_Windows_Hyper_V_Hypervisor.py | IMULMUL/etl-parser | 104 | 11153718 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-Hyper-V-Hypervisor
GUID : 52fc89f8-995e-434c-a91e-199986449890
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=2, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_2_0(Etw):
pattern = Struct(
"SchedulerType" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=10, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_10_0(Etw):
pattern = Struct(
"Error" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=11, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_11_0(Etw):
pattern = Struct(
"Error" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=12, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_12_0(Etw):
pattern = Struct(
"ProcessorFeatures" / Int64ul,
"XsaveFeatures" / Int64ul,
"CLFlushSize" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=20, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_20_0(Etw):
pattern = Struct(
"ErrorCode" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=26, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_26_0(Etw):
pattern = Struct(
"BalStatus" / Int64ul,
"Error" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=34, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_34_0(Etw):
pattern = Struct(
"ImageName" / WString,
"Status" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=36, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_36_0(Etw):
pattern = Struct(
"ImageName" / WString
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=37, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_37_0(Etw):
pattern = Struct(
"ImageName" / WString
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=38, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_38_0(Etw):
pattern = Struct(
"BalStatus" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=39, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_39_0(Etw):
pattern = Struct(
"LoadOptions" / CString
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=40, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_40_0(Etw):
pattern = Struct(
"HypervisorVersion" / Int32ul,
"VersionSupported" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=46, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_46_0(Etw):
pattern = Struct(
"MSRIndex" / Int32ul,
"AllowedZeroes" / Int64ul,
"AllowedOnes" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=48, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_48_0(Etw):
pattern = Struct(
"Leaf" / Int32ul,
"Register" / Int32ul,
"FeaturesNeeded" / Int32ul,
"FeaturesSupported" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=63, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_63_0(Etw):
pattern = Struct(
"Phase" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=80, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_80_0(Etw):
pattern = Struct(
"NtStatus" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=86, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_86_0(Etw):
pattern = Struct(
"ExpectedVersion" / Int32ul,
"ActualVersion" / Int32ul,
"ExpectedFunctionTableSize" / Int32ul,
"ActualFunctionTableSize" / Int32ul,
"UpdateDllName" / WString
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=96, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_96_0(Etw):
pattern = Struct(
"CPU" / Int32ul,
"ErrorCode" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=97, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_97_0(Etw):
pattern = Struct(
"CPU" / Int32ul,
"LeafNumber" / Int64ul,
"Register" / Int64ul,
"BSPCpuidData" / Int64ul,
"APCpuidData" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=129, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_129_0(Etw):
pattern = Struct(
"HardwarePresent" / Int8ul,
"HardwareEnabled" / Int8ul,
"Policy" / Int64ul,
"EnabledFeatures" / Int64ul,
"InternalInfo" / Int64ul,
"Problems" / Int64ul,
"AdditionalInfo" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=144, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_144_0(Etw):
pattern = Struct(
"DeviceId" / Int64ul,
"PartitionId" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=145, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_145_0(Etw):
pattern = Struct(
"DeviceId" / Int64ul,
"PartitionId" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=146, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_146_0(Etw):
pattern = Struct(
"DeviceId" / Int64ul,
"PartitionId" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=147, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_147_0(Etw):
pattern = Struct(
"IoApicId" / Int8ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=148, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_148_0(Etw):
pattern = Struct(
"DeviceId" / Int64ul,
"UnitBaseAddress" / Int64ul,
"PartitionId" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=149, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_149_0(Etw):
pattern = Struct(
"DeviceId" / Int64ul,
"PartitionId" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=152, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_152_0(Etw):
pattern = Struct(
"ImageName" / WString,
"Status" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=153, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_153_0(Etw):
pattern = Struct(
"ImageName" / WString
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=154, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_154_0(Etw):
pattern = Struct(
"MaxDelta" / Int64sl,
"MinDelta" / Int64sl
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=155, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_155_0(Etw):
pattern = Struct(
"ProcessorFeatures" / Int64ul,
"XsaveFeatures" / Int64ul,
"CLFlushSize" / Int32ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=156, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_156_0(Etw):
pattern = Struct(
"NotAffectedRdclNo" / Int8ul,
"NotAffectedAtom" / Int8ul,
"CacheFlushSupported" / Int8ul,
"SmtEnabled" / Int8ul,
"ParentHypervisorFlushes" / Int8ul,
"DisabledLoadOption" / Int8ul,
"Enabled" / Int8ul,
"CacheFlushNeeded" / Int8ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=8451, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_8451_0(Etw):
pattern = Struct(
"Error" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=12550, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_12550_0(Etw):
pattern = Struct(
"Msr" / Int32ul,
"IsWrite" / Int8ul,
"MsrValue" / Int64ul,
"AccessStatus" / Int16ul,
"Pc" / Int64ul,
"ImageBase" / Int64ul,
"ImageChecksum" / Int32ul,
"ImageTimestamp" / Int32ul,
"ImageName" / CString
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=16641, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_16641_0(Etw):
pattern = Struct(
"PartitionId" / Int64ul
)
@declare(guid=guid("52fc89f8-995e-434c-a91e-199986449890"), event_id=16642, version=0)
class Microsoft_Windows_Hyper_V_Hypervisor_16642_0(Etw):
pattern = Struct(
"PartitionId" / Int64ul
)
|
tests/opytimizer/optimizers/evolutionary/test_iwo.py | anukaal/opytimizer | 528 | 11153775 | import numpy as np
from opytimizer.optimizers.evolutionary import iwo
from opytimizer.spaces import search
np.random.seed(0)
def test_iwo_params():
params = {
'min_seeds': 0,
'max_seeds': 5,
'e': 2,
'final_sigma': 0.001,
'init_sigma': 3
}
new_iwo = iwo.IWO(params=params)
assert new_iwo.min_seeds == 0
assert new_iwo.max_seeds == 5
assert new_iwo.e == 2
assert new_iwo.final_sigma == 0.001
assert new_iwo.init_sigma == 3
def test_iwo_params_setter():
new_iwo = iwo.IWO()
try:
new_iwo.min_seeds = 'a'
except:
new_iwo.min_seeds = 0
try:
new_iwo.min_seeds = -1
except:
new_iwo.min_seeds = 0
assert new_iwo.min_seeds == 0
try:
new_iwo.max_seeds = 'b'
except:
new_iwo.max_seeds = 2
try:
new_iwo.max_seeds = -1
except:
new_iwo.max_seeds = 2
assert new_iwo.max_seeds == 2
try:
new_iwo.e = 'c'
except:
new_iwo.e = 1.5
try:
new_iwo.e = -1
except:
new_iwo.e = 1.5
assert new_iwo.e == 1.5
try:
new_iwo.final_sigma = 'd'
except:
new_iwo.final_sigma = 1.5
try:
new_iwo.final_sigma = -1
except:
new_iwo.final_sigma = 1.5
assert new_iwo.final_sigma == 1.5
try:
new_iwo.init_sigma = 'e'
except:
new_iwo.init_sigma = 2.0
try:
new_iwo.init_sigma = -1
except:
new_iwo.init_sigma = 2.0
try:
new_iwo.init_sigma = 1.3
except:
new_iwo.init_sigma = 2.0
assert new_iwo.init_sigma == 2.0
try:
new_iwo.sigma = 'f'
except:
new_iwo.sigma = 1
assert new_iwo.sigma == 1
def test_iwo_spatial_dispersal():
new_iwo = iwo.IWO()
new_iwo._spatial_dispersal(1, 10)
assert new_iwo.sigma == 2.43019
def test_iwo_produce_offspring():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=2, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_iwo = iwo.IWO()
agent = new_iwo._produce_offspring(search_space.agents[0], square)
assert type(agent).__name__ == 'Agent'
def test_iwo_update():
def square(x):
return np.sum(x**2)
new_iwo = iwo.IWO()
new_iwo.min_seeds = 5
new_iwo.max_seeds = 20
search_space = search.SearchSpace(n_agents=5, n_variables=2,
lower_bound=[1, 1], upper_bound=[10, 10])
new_iwo.update(search_space, square, 1, 10)
|
tensorflow/contrib/bayesflow/python/ops/hmc_impl.py | ryorda/tensorflow-viennacl | 522 | 11153780 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hamiltonian Monte Carlo, a gradient-based MCMC algorithm.
@@chain
@@update
@@leapfrog_integrator
@@leapfrog_step
@@ais_chain
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = [
'chain',
'kernel',
'leapfrog_integrator',
'leapfrog_step',
'ais_chain'
]
def _make_potential_and_grad(target_log_prob_fn):
def potential_and_grad(x):
log_prob_result = -target_log_prob_fn(x)
grad_result = gradients_impl.gradients(math_ops.reduce_sum(log_prob_result),
x)[0]
return log_prob_result, grad_result
return potential_and_grad
def chain(n_iterations, step_size, n_leapfrog_steps, initial_x,
target_log_prob_fn, event_dims=(), name=None):
"""Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)
algorithm that takes a series of gradient-informed steps to produce
a Metropolis proposal. This function samples from an HMC Markov
chain whose initial state is `initial_x` and whose stationary
distribution has log-density `target_log_prob_fn()`.
This function can update multiple chains in parallel. It assumes
that all dimensions of `initial_x` not specified in `event_dims` are
independent, and should therefore be updated independently. The
output of `target_log_prob_fn()` should sum log-probabilities across
all event dimensions. Slices along dimensions not in `event_dims`
may have different target distributions; this is up to
`target_log_prob_fn()`.
This function basically just wraps `hmc.kernel()` in a tf.scan() loop.
Args:
n_iterations: Integer number of Markov chain updates to run.
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`initial_x`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely.
When possible, it's often helpful to match per-variable step
sizes to the standard deviations of the target distribution in
each variable.
n_leapfrog_steps: Integer number of steps to run the leapfrog
integrator for. Total progress per HMC step is roughly
proportional to step_size * n_leapfrog_steps.
initial_x: Tensor of initial state(s) of the Markov chain(s).
target_log_prob_fn: Python callable which takes an argument like `initial_x`
and returns its (possibly unnormalized) log-density under the target
distribution.
event_dims: List of dimensions that should not be treated as
independent. This allows for multiple chains to be run independently
in parallel. Default is (), i.e., all dimensions are independent.
name: Python `str` name prefixed to Ops created by this function.
Returns:
acceptance_probs: Tensor with the acceptance probabilities for each
iteration. Has shape matching `target_log_prob_fn(initial_x)`.
chain_states: Tensor with the state of the Markov chain at each iteration.
Has shape `[n_iterations, initial_x.shape[0],...,initial_x.shape[-1]`.
#### Examples:
```python
# Sampling from a standard normal (note `log_joint()` is unnormalized):
def log_joint(x):
return tf.reduce_sum(-0.5 * tf.square(x))
chain, acceptance_probs = hmc.chain(1000, 0.5, 2, tf.zeros(10), log_joint,
event_dims=[0])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from a diagonal-variance Gaussian:
variances = tf.linspace(1., 3., 10)
def log_joint(x):
return tf.reduce_sum(-0.5 / variances * tf.square(x))
chain, acceptance_probs = hmc.chain(1000, 0.5, 2, tf.zeros(10), log_joint,
event_dims=[0])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from factor-analysis posteriors with known factors W:
# mu[i, j] ~ Normal(0, 1)
# x[i] ~ Normal(matmul(mu[i], W), I)
def log_joint(mu, x, W):
prior = -0.5 * tf.reduce_sum(tf.square(mu), 1)
x_mean = tf.matmul(mu, W)
likelihood = -0.5 * tf.reduce_sum(tf.square(x - x_mean), 1)
return prior + likelihood
chain, acceptance_probs = hmc.chain(1000, 0.1, 2,
tf.zeros([x.shape[0], W.shape[0]]),
lambda mu: log_joint(mu, x, W),
event_dims=[1])
# Discard first half of chain as warmup/burn-in
warmed_up = chain[500:]
mean_est = tf.reduce_mean(warmed_up, 0)
var_est = tf.reduce_mean(tf.square(warmed_up), 0) - tf.square(mean_est)
```
```python
# Sampling from the posterior of a Bayesian regression model.:
# Run 100 chains in parallel, each with a different initialization.
initial_beta = tf.random_normal([100, x.shape[1]])
chain, acceptance_probs = hmc.chain(1000, 0.1, 10, initial_beta,
log_joint_partial, event_dims=[1])
# Discard first halves of chains as warmup/burn-in
warmed_up = chain[500:]
# Averaging across samples within a chain and across chains
mean_est = tf.reduce_mean(warmed_up, [0, 1])
var_est = tf.reduce_mean(tf.square(warmed_up), [0, 1]) - tf.square(mean_est)
```
"""
with ops.name_scope(name, 'hmc_chain', [n_iterations, step_size,
n_leapfrog_steps, initial_x]):
initial_x = ops.convert_to_tensor(initial_x, name='initial_x')
non_event_shape = array_ops.shape(target_log_prob_fn(initial_x))
def body(a, _):
updated_x, acceptance_probs, log_prob, grad = kernel(
step_size, n_leapfrog_steps, a[0], target_log_prob_fn, event_dims,
a[2], a[3])
return updated_x, acceptance_probs, log_prob, grad
potential_and_grad = _make_potential_and_grad(target_log_prob_fn)
potential, grad = potential_and_grad(initial_x)
return functional_ops.scan(body, array_ops.zeros(n_iterations),
(initial_x, array_ops.zeros(non_event_shape),
-potential, -grad))[:2]
def ais_chain(n_iterations, step_size, n_leapfrog_steps, initial_x,
target_log_prob_fn, proposal_log_prob_fn, event_dims=(),
name=None):
"""Runs annealed importance sampling (AIS) to estimate normalizing constants.
This routine uses Hamiltonian Monte Carlo to sample from a series of
distributions that slowly interpolates between an initial "proposal"
distribution
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
E[exp(w)] = exp(target_log_normalizer - proposal_log_normalizer).
Args:
n_iterations: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`initial_x`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely.
When possible, it's often helpful to match per-variable step
sizes to the standard deviations of the target distribution in
each variable.
n_leapfrog_steps: Integer number of steps to run the leapfrog
integrator for. Total progress per HMC step is roughly
proportional to step_size * n_leapfrog_steps.
initial_x: Tensor of initial state(s) of the Markov chain(s). Must
be a sample from q, or results will be incorrect.
target_log_prob_fn: Python callable which takes an argument like `initial_x`
and returns its (possibly unnormalized) log-density under the target
distribution.
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
event_dims: List of dimensions that should not be treated as
independent. This allows for multiple chains to be run independently
in parallel. Default is (), i.e., all dimensions are independent.
name: Python `str` name prefixed to Ops created by this function.
Returns:
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(initial_x)`.
chain_states: Tensor with the state(s) of the Markov chain(s) the final
iteration. Has shape matching `initial_x`.
acceptance_probs: Tensor with the acceptance probabilities for the final
iteration. Has shape matching `target_log_prob_fn(initial_x)`.
#### Examples:
```python
# Estimating the normalizing constant of a log-gamma distribution:
def proposal_log_prob(x):
# Standard normal log-probability. This is properly normalized.
return tf.reduce_sum(-0.5 * tf.square(x) - 0.5 * np.log(2 * np.pi), 1)
def target_log_prob(x):
# Unnormalized log-gamma(2, 3) distribution.
# True normalizer is (lgamma(2) - 2 * log(3)) * x.shape[1]
return tf.reduce_sum(2. * x - 3. * tf.exp(x), 1)
# Run 100 AIS chains in parallel
initial_x = tf.random_normal([100, 20])
w, _, _ = hmc.ais_chain(1000, 0.2, 2, initial_x, target_log_prob,
proposal_log_prob, event_dims=[1])
log_normalizer_estimate = tf.reduce_logsumexp(w) - np.log(100)
```
```python
# Estimating the marginal likelihood of a Bayesian regression model:
base_measure = -0.5 * np.log(2 * np.pi)
def proposal_log_prob(x):
# Standard normal log-probability. This is properly normalized.
return tf.reduce_sum(-0.5 * tf.square(x) + base_measure, 1)
def regression_log_joint(beta, x, y):
# This function returns a vector whose ith element is log p(beta[i], y | x).
# Each row of beta corresponds to the state of an independent Markov chain.
log_prior = tf.reduce_sum(-0.5 * tf.square(beta) + base_measure, 1)
means = tf.matmul(beta, x, transpose_b=True)
log_likelihood = tf.reduce_sum(-0.5 * tf.square(y - means) +
base_measure, 1)
return log_prior + log_likelihood
def log_joint_partial(beta):
return regression_log_joint(beta, x, y)
# Run 100 AIS chains in parallel
initial_beta = tf.random_normal([100, x.shape[1]])
w, beta_samples, _ = hmc.ais_chain(1000, 0.1, 2, initial_beta,
log_joint_partial, proposal_log_prob,
event_dims=[1])
log_normalizer_estimate = tf.reduce_logsumexp(w) - np.log(100)
```
"""
with ops.name_scope(name, 'hmc_ais_chain',
[n_iterations, step_size, n_leapfrog_steps, initial_x]):
non_event_shape = array_ops.shape(target_log_prob_fn(initial_x))
beta_series = math_ops.linspace(0., 1., n_iterations+1)[1:]
def _body(a, beta): # pylint: disable=missing-docstring
def log_prob_beta(x):
return ((1 - beta) * proposal_log_prob_fn(x) +
beta * target_log_prob_fn(x))
last_x = a[0]
w = a[2]
w += (1. / n_iterations) * (target_log_prob_fn(last_x) -
proposal_log_prob_fn(last_x))
# TODO(b/66917083): There's an opportunity for gradient reuse here.
updated_x, acceptance_probs, _, _ = kernel(step_size, n_leapfrog_steps,
last_x, log_prob_beta,
event_dims)
return updated_x, acceptance_probs, w
x, acceptance_probs, w = functional_ops.scan(
_body, beta_series, (initial_x, array_ops.zeros(non_event_shape),
array_ops.zeros(non_event_shape)))
return w[-1], x[-1], acceptance_probs[-1]
def kernel(step_size, n_leapfrog_steps, x, target_log_prob_fn, event_dims=(),
x_log_prob=None, x_grad=None, name=None):
"""Runs one iteration of Hamiltonian Monte Carlo.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)
algorithm that takes a series of gradient-informed steps to produce
a Metropolis proposal. This function applies one step of HMC to
randomly update the variable `x`.
This function can update multiple chains in parallel. It assumes
that all dimensions of `x` not specified in `event_dims` are
independent, and should therefore be updated independently. The
output of `target_log_prob_fn()` should sum log-probabilities across
all event dimensions. Slices along dimensions not in `event_dims`
may have different target distributions; for example, if
`event_dims == (1,)`, then `x[0, :]` could have a different target
distribution from x[1, :]. This is up to `target_log_prob_fn()`.
Args:
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`x`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely.
When possible, it's often helpful to match per-variable step
sizes to the standard deviations of the target distribution in
each variable.
n_leapfrog_steps: Integer number of steps to run the leapfrog
integrator for. Total progress per HMC step is roughly
proportional to step_size * n_leapfrog_steps.
x: Tensor containing the value(s) of the random variable(s) to update.
target_log_prob_fn: Python callable which takes an argument like `initial_x`
and returns its (possibly unnormalized) log-density under the target
distribution.
event_dims: List of dimensions that should not be treated as
independent. This allows for multiple chains to be run independently
in parallel. Default is (), i.e., all dimensions are independent.
x_log_prob (optional): Tensor containing the cached output of a previous
call to `target_log_prob_fn()` evaluated at `x` (such as that provided by
a previous call to `kernel()`). Providing `x_log_prob` and
`x_grad` saves one gradient computation per call to `kernel()`.
x_grad (optional): Tensor containing the cached gradient of
`target_log_prob_fn()` evaluated at `x` (such as that provided by
a previous call to `kernel()`). Providing `x_log_prob` and
`x_grad` saves one gradient computation per call to `kernel()`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
updated_x: The updated variable(s) x. Has shape matching `initial_x`.
acceptance_probs: Tensor with the acceptance probabilities for the final
iteration. This is useful for diagnosing step size problems etc. Has
shape matching `target_log_prob_fn(initial_x)`.
new_log_prob: The value of `target_log_prob_fn()` evaluated at `updated_x`.
new_grad: The value of the gradient of `target_log_prob_fn()` evaluated at
`updated_x`.
#### Examples:
```python
# Tuning acceptance rates:
target_accept_rate = 0.631
def target_log_prob(x):
# Standard normal
return tf.reduce_sum(-0.5 * tf.square(x))
initial_x = tf.zeros([10])
initial_log_prob = target_log_prob(initial_x)
initial_grad = tf.gradients(initial_log_prob, initial_x)[0]
# Algorithm state
x = tf.Variable(initial_x, name='x')
step_size = tf.Variable(1., name='step_size')
last_log_prob = tf.Variable(initial_log_prob, name='last_log_prob')
last_grad = tf.Variable(initial_grad, name='last_grad')
# Compute updates
new_x, acceptance_prob, log_prob, grad = hmc.kernel(step_size, 3, x,
target_log_prob,
event_dims=[0],
x_log_prob=last_log_prob)
x_update = tf.assign(x, new_x)
log_prob_update = tf.assign(last_log_prob, log_prob)
grad_update = tf.assign(last_grad, grad)
step_size_update = tf.assign(step_size,
tf.where(acceptance_prob > target_accept_rate,
step_size * 1.01, step_size / 1.01))
adaptive_updates = [x_update, log_prob_update, grad_update, step_size_update]
sampling_updates = [x_update, log_prob_update, grad_update]
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Warm up the sampler and adapt the step size
for i in xrange(500):
sess.run(adaptive_updates)
# Collect samples without adapting step size
samples = np.zeros([500, 10])
for i in xrange(500):
x_val, _ = sess.run([new_x, sampling_updates])
samples[i] = x_val
```
```python
# Empirical-Bayes estimation of a hyperparameter by MCMC-EM:
# Problem setup
N = 150
D = 10
x = np.random.randn(N, D).astype(np.float32)
true_sigma = 0.5
true_beta = true_sigma * np.random.randn(D).astype(np.float32)
y = x.dot(true_beta) + np.random.randn(N).astype(np.float32)
def log_prior(beta, log_sigma):
return tf.reduce_sum(-0.5 / tf.exp(2 * log_sigma) * tf.square(beta) -
log_sigma)
def regression_log_joint(beta, log_sigma, x, y):
# This function returns log p(beta | log_sigma) + log p(y | x, beta).
means = tf.matmul(tf.expand_dims(beta, 0), x, transpose_b=True)
means = tf.squeeze(means)
log_likelihood = tf.reduce_sum(-0.5 * tf.square(y - means))
return log_prior(beta, log_sigma) + log_likelihood
def log_joint_partial(beta):
return regression_log_joint(beta, log_sigma, x, y)
# Our estimate of log(sigma)
log_sigma = tf.Variable(0., name='log_sigma')
# The state of the Markov chain
beta = tf.Variable(tf.random_normal([x.shape[1]]), name='beta')
new_beta, _, _, _ = hmc.kernel(0.1, 5, beta, log_joint_partial,
event_dims=[0])
beta_update = tf.assign(beta, new_beta)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
with tf.control_dependencies([beta_update]):
log_sigma_update = optimizer.minimize(-log_prior(beta, log_sigma),
var_list=[log_sigma])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
log_sigma_history = np.zeros(1000)
for i in xrange(1000):
log_sigma_val, _ = sess.run([log_sigma, log_sigma_update])
log_sigma_history[i] = log_sigma_val
# Should converge to something close to true_sigma
plt.plot(np.exp(log_sigma_history))
```
"""
with ops.name_scope(name, 'hmc_kernel', [step_size, n_leapfrog_steps, x]):
potential_and_grad = _make_potential_and_grad(target_log_prob_fn)
x_shape = array_ops.shape(x)
m = random_ops.random_normal(x_shape)
kinetic_0 = 0.5 * math_ops.reduce_sum(math_ops.square(m), event_dims)
if (x_log_prob is not None) and (x_grad is not None):
log_potential_0, grad_0 = -x_log_prob, -x_grad # pylint: disable=invalid-unary-operand-type
else:
if x_log_prob is not None:
logging.warn('x_log_prob was provided, but x_grad was not,'
' so x_log_prob was not used.')
if x_grad is not None:
logging.warn('x_grad was provided, but x_log_prob was not,'
' so x_grad was not used.')
log_potential_0, grad_0 = potential_and_grad(x)
new_x, new_m, log_potential_1, grad_1 = leapfrog_integrator(
step_size, n_leapfrog_steps, x, m, potential_and_grad, grad_0)
kinetic_1 = 0.5 * math_ops.reduce_sum(math_ops.square(new_m), event_dims)
# TODO(mhoffman): It seems like there may be an opportunity for nans here.
# I'm delaying addressing this because we're going to refactor this part
# to use the more general Metropolis abstraction anyway.
acceptance_probs = math_ops.exp(math_ops.minimum(0., log_potential_0 -
log_potential_1 +
kinetic_0 - kinetic_1))
accepted = math_ops.cast(
random_ops.random_uniform(array_ops.shape(acceptance_probs)) <
acceptance_probs, np.float32)
new_log_prob = (-log_potential_0 * (1. - accepted) -
log_potential_1 * accepted)
# TODO(b/65738010): This should work, but it doesn't for now.
# reduced_shape = math_ops.reduced_shape(x_shape, event_dims)
reduced_shape = array_ops.shape(math_ops.reduce_sum(x, event_dims,
keep_dims=True))
accepted = array_ops.reshape(accepted, reduced_shape)
new_x = x * (1. - accepted) + new_x * accepted
new_grad = -grad_0 * (1. - accepted) - grad_1 * accepted
return new_x, acceptance_probs, new_log_prob, new_grad
def leapfrog_integrator(step_size, n_steps, initial_position, initial_momentum,
potential_and_grad, initial_grad, name=None):
"""Applies `n_steps` steps of the leapfrog integrator.
This just wraps `leapfrog_step()` in a `tf.while_loop()`, reusing
gradient computations where possible.
Args:
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`initial_position`. Larger step sizes lead to faster progress, but
too-large step sizes lead to larger discretization error and
worse energy conservation.
n_steps: Number of steps to run the leapfrog integrator.
initial_position: Tensor containing the value(s) of the position variable(s)
to update.
initial_momentum: Tensor containing the value(s) of the momentum variable(s)
to update.
potential_and_grad: Python callable that takes a position tensor like
`initial_position` and returns the potential energy and its gradient at
that position.
initial_grad: Tensor with the value of the gradient of the potential energy
at `initial_position`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
updated_position: Updated value of the position.
updated_momentum: Updated value of the momentum.
new_potential: Potential energy of the new position. Has shape matching
`potential_and_grad(initial_position)`.
new_grad: Gradient from potential_and_grad() evaluated at the new position.
Has shape matching `initial_position`.
Example: Simple quadratic potential.
```python
def potential_and_grad(position):
return tf.reduce_sum(0.5 * tf.square(position)), position
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
potential, grad = potential_and_grad(position)
new_position, new_momentum, new_potential, new_grad = hmc.leapfrog_integrator(
0.1, 3, position, momentum, potential_and_grad, grad)
sess = tf.Session()
position_val = np.random.randn(10)
momentum_val = np.random.randn(10)
potential_val, grad_val = sess.run([potential, grad],
{position: position_val})
positions = np.zeros([100, 10])
for i in xrange(100):
position_val, momentum_val, potential_val, grad_val = sess.run(
[new_position, new_momentum, new_potential, new_grad],
{position: position_val, momentum: momentum_val})
positions[i] = position_val
# Should trace out sinusoidal dynamics.
plt.plot(positions[:, 0])
```
"""
def leapfrog_wrapper(step_size, x, m, grad, l):
x, m, _, grad = leapfrog_step(step_size, x, m, potential_and_grad, grad)
return step_size, x, m, grad, l + 1
def counter_fn(a, b, c, d, counter): # pylint: disable=unused-argument
return counter < n_steps
with ops.name_scope(name, 'leapfrog_integrator',
[step_size, n_steps, initial_position, initial_momentum,
initial_grad]):
_, new_x, new_m, new_grad, _ = control_flow_ops.while_loop(
counter_fn, leapfrog_wrapper, [step_size, initial_position,
initial_momentum, initial_grad,
array_ops.constant(0)], back_prop=False)
# We're counting on the runtime to eliminate this redundant computation.
new_potential, new_grad = potential_and_grad(new_x)
return new_x, new_m, new_potential, new_grad
def leapfrog_step(step_size, position, momentum, potential_and_grad, grad,
name=None):
"""Applies one step of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: 0.5 * ||momentum||^2.
Args:
step_size: Scalar step size or array of step sizes for the
leapfrog integrator. Broadcasts to the shape of
`position`. Larger step sizes lead to faster progress, but
too-large step sizes lead to larger discretization error and
worse energy conservation.
position: Tensor containing the value(s) of the position variable(s)
to update.
momentum: Tensor containing the value(s) of the momentum variable(s)
to update.
potential_and_grad: Python callable that takes a position tensor like
`position` and returns the potential energy and its gradient at that
position.
grad: Tensor with the value of the gradient of the potential energy
at `position`.
name: Python `str` name prefixed to Ops created by this function.
Returns:
updated_position: Updated value of the position.
updated_momentum: Updated value of the momentum.
new_potential: Potential energy of the new position. Has shape matching
`potential_and_grad(position)`.
new_grad: Gradient from potential_and_grad() evaluated at the new position.
Has shape matching `position`.
Example: Simple quadratic potential.
```python
def potential_and_grad(position):
# Simple quadratic potential
return tf.reduce_sum(0.5 * tf.square(position)), position
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
potential, grad = potential_and_grad(position)
new_position, new_momentum, new_potential, new_grad = hmc.leapfrog_step(
0.1, position, momentum, potential_and_grad, grad)
sess = tf.Session()
position_val = np.random.randn(10)
momentum_val = np.random.randn(10)
potential_val, grad_val = sess.run([potential, grad],
{position: position_val})
positions = np.zeros([100, 10])
for i in xrange(100):
position_val, momentum_val, potential_val, grad_val = sess.run(
[new_position, new_momentum, new_potential, new_grad],
{position: position_val, momentum: momentum_val})
positions[i] = position_val
# Should trace out sinusoidal dynamics.
plt.plot(positions[:, 0])
```
"""
with ops.name_scope(name, 'leapfrog_step', [step_size, position, momentum,
grad]):
momentum -= 0.5 * step_size * grad
position += step_size * momentum
potential, grad = potential_and_grad(position)
momentum -= 0.5 * step_size * grad
return position, momentum, potential, grad
|
setup.py | phrfpeixoto/python-redis-rate-limit | 105 | 11153785 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('requirements.txt') as f:
requires = f.readlines()
setup(
name='python-redis-rate-limit',
version='0.0.7',
description=u'Python Rate Limiter based on Redis.',
long_description=readme,
author=u'<NAME>',
author_email=u'<EMAIL>',
url=u'https://github.com/evoluxbr/python-redis-rate-limit',
license=u'MIT',
packages=find_packages(exclude=('tests', 'docs')),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License'
],
install_requires=requires
)
|
homeassistant/components/forecast_solar/energy.py | MrDelik/core | 30,023 | 11153796 | <filename>homeassistant/components/forecast_solar/energy.py
"""Energy platform."""
from __future__ import annotations
from homeassistant.core import HomeAssistant
from .const import DOMAIN
async def async_get_solar_forecast(
hass: HomeAssistant, config_entry_id: str
) -> dict[str, dict[str, float | int]] | None:
"""Get solar forecast for a config entry ID."""
if (coordinator := hass.data[DOMAIN].get(config_entry_id)) is None:
return None
return {
"wh_hours": {
timestamp.isoformat(): val
for timestamp, val in coordinator.data.wh_hours.items()
}
}
|
src/pretalx/common/migrations/0003_activitylog_is_orga_action.py | hrchu/pretalx | 418 | 11153845 | <filename>src/pretalx/common/migrations/0003_activitylog_is_orga_action.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-05 13:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("common", "0002_auto_20170429_1018"),
]
operations = [
migrations.AddField(
model_name="activitylog",
name="is_orga_action",
field=models.BooleanField(default=False),
),
]
|
examples/web_cookies.py | loven-doo/aiohttp | 10,338 | 11153849 | #!/usr/bin/env python3
"""Example for aiohttp.web basic server with cookies.
"""
from pprint import pformat
from typing import NoReturn
from aiohttp import web
tmpl = """\
<html>
<body>
<a href="/login">Login</a><br/>
<a href="/logout">Logout</a><br/>
<pre>{}</pre>
</body>
</html>"""
async def root(request: web.Request) -> web.StreamResponse:
resp = web.Response(content_type="text/html")
resp.text = tmpl.format(pformat(request.cookies))
return resp
async def login(request: web.Request) -> NoReturn:
exc = web.HTTPFound(location="/")
exc.set_cookie("AUTH", "secret")
raise exc
async def logout(request: web.Request) -> NoReturn:
exc = web.HTTPFound(location="/")
exc.del_cookie("AUTH")
raise exc
def init() -> web.Application:
app = web.Application()
app.router.add_get("/", root)
app.router.add_get("/login", login)
app.router.add_get("/logout", logout)
return app
web.run_app(init())
|
Sources/Workflows/SearchKippt/kippt/clips.py | yagosys/AlfredWorkflow.com | 2,177 | 11153866 | import requests
import json
class Clips:
"""Clips class
Handles the clips endpoint of the Kippt API.
"""
def __init__(self, kippt):
""" Instantiates a Clips object.
Parameters:
kippt - KipptAPI object
"""
self.kippt = kippt
def all(self, **args):
""" Return all Clips.
"""
limit = args['limit'] if 'limit' in args else 20
offset = args['offset'] if 'offset' in args else 0
r = requests.get(
"https://kippt.com/api/clips?limit=%s&offset=%s" % (limit, offset),
headers=self.kippt.header
)
return (r.json())
def feed(self, **args):
""" Return the Clip feed.
"""
limit = args['limit'] if 'limit' in args else 20
offset = args['offset'] if 'offset' in args else 0
r = requests.get(
"https://kippt.com/api/clips/feed?limit=%s&offset=%s" % (limit, offset),
headers=self.kippt.header
)
return (r.json())
def favorites(self, **args):
""" Return favorite clips.
"""
limit = args['limit'] if 'limit' in args else 20
offset = args['offset'] if 'offset' in args else 0
r = requests.get(
"https://kippt.com/api/clips/favorites?limit=%s&offset=%s" % (limit, offset),
headers=self.kippt.header
)
return (r.json())
def create(self, url, **args):
""" Create a new Kippt Clip.
Parameters:
- url (Required)
- args Dictionary of other fields
Accepted fields can be found here:
https://github.com/kippt/api-documentation/blob/master/objects/clip.md
"""
# Merge our url as a parameter and JSONify it.
data = json.dumps(dict({'url': url}, **args))
r = requests.post(
"https://kippt.com/api/clips",
headers=self.kippt.header,
data=data
)
return (r.json())
def search(self, query, **args):
""" Search for a clip.
Parameters:
- query String we are searching for.
"""
limit = args['limit'] if 'limit' in args else 20
offset = args['offset'] if 'offset' in args else 0
r = requests.get(
"https://kippt.com/api/clips/search?q=%s&limit=%s&offset=%s" % (query, limit, offset),
headers=self.kippt.header
)
return (r.json())
def clip(self, id):
""" Returns a Clip object.
"""
return Clip(self.kippt, id)
class Clip:
"""Clip class
Handles individual clip requests.
"""
def __init__(self, kippt, id):
""" Instantiates a clip object given a KipptAPI object, and a clip ID.
"""
self.kippt = kippt
self.id = id
# GET Requests
def content(self):
""" Retrieve the Clip object.
"""
r = requests.get(
"https://kippt.com/api/clips/%s" % (self.id),
headers=self.kippt.header
)
return (r.json())
def comments(self, **args):
""" Retrieve comments on a clip.
"""
limit = args['limit'] if 'limit' in args else 20
offset = args['offset'] if 'offset' in args else 0
r = requests.get(
"https://kippt.com/api/clips/%s/comments?limit=%s&offset=%s" % (self.id, limit, offset),
headers=self.kippt.header
)
return (r.json())
def likes(self, **args):
""" Retrieve likes of a clip.
"""
limit = args['limit'] if 'limit' in args else 20
offset = args['offset'] if 'offset' in args else 0
r = requests.get(
"https://kippt.com/api/clips/%s/likes?limit=%s&offset=%s" % (self.id, limit, offset),
headers=self.kippt.header
)
return (r.json())
# PUT & POST Requests
def update(self, **args):
""" Updates a Clip.
Parameters:
- args Dictionary of other fields
Accepted fields can be found here:
https://github.com/kippt/api-documentation/blob/master/objects/clip.md
"""
# JSONify our data.
data = json.dumps(args)
r = requests.put(
"https://kippt.com/api/clips/%s" % (self.id),
headers=self.kippt.header,
data=data)
return (r.json())
def like(self):
""" Like a clip.
"""
r = requests.post(
"https://kippt.com/api/clips/%s/likes" % (self.id),
headers=self.kippt.header
)
return (r.json())
def favorite(self):
""" Favorite a clip.
"""
r = requests.post(
"https://kippt.com/api/clips/%s/favorite" % (self.id),
headers=self.kippt.header
)
return (r.json())
def comment(self, body):
""" Comment on a clip.
Parameters:
- body (Required)
"""
# Merge our url as a parameter and JSONify it.
data = json.dumps({'body': body})
r = requests.post(
"https://kippt.com/api/clips/%s/comments" (self.id),
headers=self.kippt.header,
data=data
)
return (r.json())
# DELETE Requests
def delete(self):
""" Delete a clip.
"""
r = requests.delete(
"https://kippt.com/api/clips/%s" (self.id),
headers=self.kippt.header
)
return (r.json())
def unfavorite(self):
""" Unfavorite a clip.
"""
r = requests.delete(
"https://kippt.com/api/clips/%s/favorite" % (self.id),
headers=self.kippt.header
)
return (r.json())
def unlike(self):
""" Unlike a clip.
"""
r = requests.delete(
"https://kippt.com/api/clips/%s/likes" % (self.id),
headers=self.kippt.header)
return (r.json())
def uncomment(self, comment_id):
""" Remove a comment on a clip.
Parameters:
- comment_id ID of the comment to be removed.
"""
r = requests.delete(
"https://kippt.com/api/clips/%s/comments/%s" % (self.id, comment_id),
headers=self.kippt.header
)
return (r.json())
|
alipay/aop/api/domain/AlipayAccountInstfundAccountSyncModel.py | antopen/alipay-sdk-python-all | 213 | 11153868 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InstAccountDTO import InstAccountDTO
class AlipayAccountInstfundAccountSyncModel(object):
def __init__(self):
self._inst_account = None
@property
def inst_account(self):
return self._inst_account
@inst_account.setter
def inst_account(self, value):
if isinstance(value, InstAccountDTO):
self._inst_account = value
else:
self._inst_account = InstAccountDTO.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.inst_account:
if hasattr(self.inst_account, 'to_alipay_dict'):
params['inst_account'] = self.inst_account.to_alipay_dict()
else:
params['inst_account'] = self.inst_account
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayAccountInstfundAccountSyncModel()
if 'inst_account' in d:
o.inst_account = d['inst_account']
return o
|
pytest-virtualenv/pytest_virtualenv.py | manahl/pytest-plugins | 282 | 11153898 | """ Python virtual environment fixtures
"""
import os
import pathlib
import re
import shutil
import subprocess
import sys
from enum import Enum
import importlib_metadata as metadata
import pkg_resources
from pytest import yield_fixture
from pytest_shutil.workspace import Workspace
from pytest_shutil import run, cmdline
from pytest_fixture_config import Config, yield_requires_config
class PackageVersion(Enum):
LATEST = 1
CURRENT = 2
class FixtureConfig(Config):
__slots__ = ('virtualenv_executable')
# Default values for system resource locations - patch this to change defaults
# Can be a string or list of them
DEFAULT_VIRTUALENV_FIXTURE_EXECUTABLE = [sys.executable, '-m', 'virtualenv']
CONFIG = FixtureConfig(
virtualenv_executable=os.getenv('VIRTUALENV_FIXTURE_EXECUTABLE', DEFAULT_VIRTUALENV_FIXTURE_EXECUTABLE),
)
@yield_fixture(scope='function')
@yield_requires_config(CONFIG, ['virtualenv_executable'])
def virtualenv():
""" Function-scoped virtualenv in a temporary workspace.
Methods
-------
run() : run a command using this virtualenv's shell environment
run_with_coverage() : run a command in this virtualenv, collecting coverage
install_package() : install a package in this virtualenv
installed_packages() : return a dict of installed packages
Attributes
----------
virtualenv (`path.path`) : Path to this virtualenv's base directory
python (`path.path`) : Path to this virtualenv's Python executable
pip (`path.path`) : Path to this virtualenv's pip executable
.. also inherits all attributes from the `workspace` fixture
"""
venv = VirtualEnv()
yield venv
venv.teardown()
class PackageEntry(object):
# TODO: base this off of setuptools Distribution class or something not home-grown
PACKAGE_TYPES = (ANY, DEV, SRC, REL) = ('ANY', 'DEV', 'SRC', 'REL')
def __init__(self, name, version, source_path=None):
self.name = name
self.version = version
self.source_path = source_path
@property
def issrc(self):
return ("dev" in self.version and
self.source_path is not None and
not self.source_path.endswith(".egg"))
@property
def isrel(self):
return not self.isdev
@property
def isdev(self):
return ('dev' in self.version and
(not self.source_path or self.source_path.endswith(".egg")))
def match(self, package_type):
if package_type is self.ANY:
return True
elif package_type is self.REL:
if self.isrel:
return True
elif package_type is self.DEV:
if self.isdev:
return True
elif package_type is self.SRC:
if self.issrc:
return True
return False
class VirtualEnv(Workspace):
"""
Creates a virtualenv in a temporary workspace, cleans up on exit.
Attributes
----------
python : `str`
path to the python exe
virtualenv : `str`
path to the virtualenv base dir
env : 'list'
environment variables used in creation of virtualenv
delete_workspace: `None or bool`
If True then the workspace will be deleted
If False then the workspace will be kept
If None (default) then the workspace will be deleted if workspace is also None, but it will be kept otherwise
"""
# TODO: update to use pip, remove distribute
def __init__(self, env=None, workspace=None, name='.env', python=None, args=None, delete_workspace=None):
if delete_workspace is None:
delete_workspace = workspace is None
Workspace.__init__(self, workspace, delete_workspace)
self.virtualenv = self.workspace / name
self.args = args or []
if sys.platform == 'win32':
# In virtualenv on windows "Scripts" folder is used instead of "bin".
self.python = self.virtualenv / 'Scripts' / 'python.exe'
self.pip = self.virtualenv / 'Scripts' / 'pip.exe'
self.coverage = self.virtualenv / 'Scripts' / 'coverage.exe'
else:
self.python = self.virtualenv / 'bin' / 'python'
self.pip = self.virtualenv / "bin" / "pip"
self.coverage = self.virtualenv / 'bin' / 'coverage'
if env is None:
self.env = dict(os.environ)
else:
self.env = dict(env) # ensure we take a copy just in case there's some modification
self.env['VIRTUAL_ENV'] = str(self.virtualenv)
self.env['PATH'] = str(self.python.dirname()) + ((os.path.pathsep + self.env["PATH"])
if "PATH" in self.env else "")
if 'PYTHONPATH' in self.env:
del(self.env['PYTHONPATH'])
self.virtualenv_cmd = CONFIG.virtualenv_executable
if isinstance(self.virtualenv_cmd, str):
cmd = [self.virtualenv_cmd]
else:
cmd = list(self.virtualenv_cmd)
cmd.extend(['-p', python or cmdline.get_real_python_executable()])
cmd.extend(self.args)
cmd.append(str(self.virtualenv))
self.run(cmd)
self._importlib_metadata_installed = False
def run(self, args, **kwargs):
"""
Add our cleaned shell environment into any subprocess execution
"""
if 'env' not in kwargs:
kwargs['env'] = self.env
return super(VirtualEnv, self).run(args, **kwargs)
def run_with_coverage(self, *args, **kwargs):
"""
Run a python script using coverage, run within this virtualenv.
Assumes the coverage module is already installed.
Parameters
----------
args:
Args passed into `pytest_shutil.run.run_with_coverage`
kwargs:
Keyword arguments to pass to `pytest_shutil.run.run_with_coverage`
"""
if 'env' not in kwargs:
kwargs['env'] = self.env
coverage = [str(self.python), str(self.coverage)]
return run.run_with_coverage(*args, coverage=coverage, **kwargs)
def install_package(self, pkg_name, version=PackageVersion.LATEST, installer="pip", installer_command="install"):
"""
Install a given package name. If it's already setup in the
test runtime environment, it will use that.
:param pkg_name: `str`
Name of the package to be installed
:param version: `str` or `PackageVersion`
If PackageVersion.LATEST then installs the latest version of the package from upstream
If PackageVersion.CURRENT then installs the same version that's installed in the current virtual environment
that's running the tests If the package is an egg-link, then copy it over. If the
package is not in the parent, then installs the latest version
If the value is a string, then it will be used as the version to install
:param installer: `str`
The installer used to install packages, `pip` by default
`param installer_command: `str`
The command passed to the installed, `install` by default. So the resulting default install command is
`<venv>/Scripts/pip.exe install` on windows and `<venv>/bin/pip install` elsewhere
"""
if sys.platform == 'win32':
# In virtualenv on windows "Scripts" folder is used instead of "bin".
installer = str(self.virtualenv / 'Scripts' / installer + '.exe')
else:
installer = str(self.virtualenv / 'bin' / installer)
if not self.debug:
installer += ' -q'
if version == PackageVersion.LATEST:
self.run(
"{python} {installer} {installer_command} {spec}".format(
python=self.python, installer=installer, installer_command=installer_command, spec=pkg_name
)
)
elif version == PackageVersion.CURRENT:
dist = next(
iter([dist for dist in metadata.distributions() if _normalize(dist.name) == _normalize(pkg_name)]), None
)
if dist:
egg_link = _get_egg_link(dist.name)
if egg_link:
self._install_editable_package(egg_link, dist)
else:
spec = "{pkg_name}=={version}".format(pkg_name=pkg_name, version=dist.version)
self.run(
"{python} {installer} {installer_command} {spec}".format(
python=self.python, installer=installer, installer_command=installer_command, spec=spec
)
)
else:
self.run(
"{python} {installer} {installer_command} {spec}".format(
python=self.python, installer=installer, installer_command=installer_command, spec=pkg_name
)
)
else:
spec = "{pkg_name}=={version}".format(pkg_name=pkg_name, version=version)
self.run(
"{python} {installer} {installer_command} {spec}".format(
python=self.python, installer=installer, installer_command=installer_command, spec=spec
)
)
def installed_packages(self, package_type=None):
"""
Return a package dict with
key = package name, value = version (or '')
"""
# Lazily install importlib_metadata in the underlying virtual environment
self._install_importlib_metadata()
if package_type is None:
package_type = PackageEntry.ANY
elif package_type not in PackageEntry.PACKAGE_TYPES:
raise ValueError('invalid package_type parameter (%s)' % str(package_type))
res = {}
code = "import importlib_metadata as metadata\n"\
"for i in metadata.distributions(): print(i.name + ' ' + i.version + ' ' + str(i.locate_file('')))"
lines = self.run([self.python, "-c", code], capture=True).split('\n')
for line in [i.strip() for i in lines if i.strip()]:
name, version, location = line.split()
res[name] = PackageEntry(name, version, location)
return res
def _install_importlib_metadata(self):
if not self._importlib_metadata_installed:
self.install_package("importlib_metadata", version=PackageVersion.CURRENT)
self._importlib_metadata_installed = True
def _install_editable_package(self, egg_link, package):
python_dir = "python{}.{}".format(sys.version_info.major, sys.version_info.minor)
shutil.copy(egg_link, self.virtualenv / "lib" / python_dir / "site-packages" / egg_link.name)
easy_install_pth_path = self.virtualenv / "lib" / python_dir / "site-packages" / "easy-install.pth"
with open(easy_install_pth_path, "a") as pth, open(egg_link) as egg_link:
pth.write(egg_link.read())
pth.write("\n")
for spec in package.requires:
if not _is_extra_requirement(spec):
dependency = next(pkg_resources.parse_requirements(spec), None)
if dependency and (not dependency.marker or dependency.marker.evaluate()):
self.install_package(dependency.name, version=PackageVersion.CURRENT)
def _normalize(name):
return re.sub(r"[-_.]+", "-", name).lower()
def _get_egg_link(pkg_name):
for path in sys.path:
egg_link = pathlib.Path(path) / (pkg_name + ".egg-link")
if egg_link.is_file():
return egg_link
return None
def _is_extra_requirement(spec):
return any(x.replace(" ", "").startswith("extra==") for x in spec.split(";"))
|
BruteScan/migrations/0002_remove_bruteresult_result_flag_bruteresult_password_and_more.py | b0bac/ApolloScanner | 289 | 11153902 | <filename>BruteScan/migrations/0002_remove_bruteresult_result_flag_bruteresult_password_and_more.py
# Generated by Django 4.0.1 on 2022-03-09 12:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BruteScan', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='bruteresult',
name='result_flag',
),
migrations.AddField(
model_name='bruteresult',
name='password',
field=models.CharField(db_column='password', default='', max_length=32, verbose_name='口令'),
),
migrations.AddField(
model_name='bruteresult',
name='username',
field=models.CharField(db_column='username', default='', max_length=32, verbose_name='账号'),
),
]
|
examples/tutorial/visuals/T02_measurements.py | 3DAlgoLab/vispy | 2,617 | 11153923 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Tutorial: Creating Visuals
==========================
02. Making physical measurements
--------------------------------
In the last tutorial we created a simple Visual subclass that draws a
rectangle. In this tutorial, we will make two additions:
1. Draw a rectangular border instead of a solid rectangle
2. Make the border a fixed pixel width, even when displayed inside a
user-zoomable ViewBox.
The border is made by drawing a line_strip with 10 vertices::
1--------------3
| |
| 2------4 | [ note that points 9 and 10 are
| | | | the same as points 1 and 2 ]
| 8------6 |
| |
7--------------5
In order to ensure that the border has a fixed width in pixels, we need to
adjust the spacing between the inner and outer rectangles whenever the user
changes the zoom of the ViewBox.
How? Recall that each
time the visual is drawn, it is given a TransformSystem instance that carries
information about the size of logical and physical pixels relative to the
visual [link to TransformSystem documentation]. Essentially, we have 4
coordinate systems:
Visual -> Document -> Framebuffer -> Render
The user specifies the position and size of the rectangle in Visual
coordinates, and in [tutorial 1] we used the vertex shader to convert directly
from Visual coordinates to render coordinates. In this tutorial we will
convert first to document coordinates, then make the adjustment for the border
width, then convert the remainder of the way to render coordinates.
Let's say, for example that the user specifies the box width to be 20, and the
border width to be 5. To draw the border correctly, we cannot simply
add/subtract 5 from the inner rectangle coordinates; if the user zooms
in by a factor of 2 then the border would become 10 px wide.
Another way to say this is that a vector with length=1 in Visual coordinates
does not _necessarily_ have a length of 1 pixel on the canvas. Instead, we must
make use of the Document coordinate system, in which a vector of length=1
does correspond to 1 pixel.
There are a few ways we could make this measurement of pixel length. Here's
how we'll do it in this tutorial:
1. Begin with vertices for a rectangle with border width 0 (that is, vertex
1 is the same as vertex 2, 3=4, and so on).
2. In the vertex shader, first map the vertices to the document coordinate
system using the visual->document transform.
3. Add/subtract the line width from the mapped vertices.
4. Map the rest of the way to render coordinates with a second transform:
document->framebuffer->render.
Note that this problem _cannot_ be solved using a simple scale factor! It is
necessary to use these transformations in order to draw correctly when there
is rotation or anosotropic scaling involved.
"""
from vispy import app, gloo, visuals, scene
import numpy as np
vertex_shader = """
void main() {
// First map the vertex to document coordinates
vec4 doc_pos = $visual_to_doc(vec4($position, 0, 1));
// Also need to map the adjustment direction vector, but this is tricky!
// We need to adjust separately for each component of the vector:
vec4 adjusted;
if ( $adjust_dir.x == 0. ) {
// If this is an outer vertex, no adjustment for line weight is needed.
// (In fact, trying to make the adjustment would result in no
// triangles being drawn, hence the if/else block)
adjusted = doc_pos;
}
else {
// Inner vertexes must be adjusted for line width, but this is
// surprisingly tricky given that the rectangle may have been scaled
// and rotated!
vec4 doc_x = $visual_to_doc(vec4($adjust_dir.x, 0, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
vec4 doc_y = $visual_to_doc(vec4(0, $adjust_dir.y, 0, 0)) -
$visual_to_doc(vec4(0, 0, 0, 0));
doc_x = normalize(doc_x);
doc_y = normalize(doc_y);
// Now doc_x + doc_y points in the direction we need in order to
// correct the line weight of _both_ segments, but the magnitude of
// that correction is wrong. To correct it we first need to
// measure the width that would result from using doc_x + doc_y:
vec4 proj_y_x = dot(doc_x, doc_y) * doc_x; // project y onto x
float cur_width = length(doc_y - proj_y_x); // measure current weight
// And now we can adjust vertex position for line width:
adjusted = doc_pos + ($line_width / cur_width) * (doc_x + doc_y);
}
// Finally map the remainder of the way to render coordinates
gl_Position = $doc_to_render(adjusted);
}
"""
fragment_shader = """
void main() {
gl_FragColor = $color;
}
"""
class MyRectVisual(visuals.Visual):
"""Visual that draws a rectangular outline.
Parameters
----------
x : float
x coordinate of rectangle origin
y : float
y coordinate of rectangle origin
w : float
width of rectangle
h : float
height of rectangle
weight : float
width of border (in px)
"""
def __init__(self, x, y, w, h, weight=4.0):
visuals.Visual.__init__(self, vertex_shader, fragment_shader)
# 10 vertices for 8 triangles (using triangle_strip) forming a
# rectangular outline
self.vert_buffer = gloo.VertexBuffer(np.array([
[x, y],
[x, y],
[x+w, y],
[x+w, y],
[x+w, y+h],
[x+w, y+h],
[x, y+h],
[x, y+h],
[x, y],
[x, y],
], dtype=np.float32))
# Direction each vertex should move to correct for line width
# (the length of this vector will be corrected in the shader)
self.adj_buffer = gloo.VertexBuffer(np.array([
[0, 0],
[1, 1],
[0, 0],
[-1, 1],
[0, 0],
[-1, -1],
[0, 0],
[1, -1],
[0, 0],
[1, 1],
], dtype=np.float32))
self.shared_program.vert['position'] = self.vert_buffer
self.shared_program.vert['adjust_dir'] = self.adj_buffer
self.shared_program.vert['line_width'] = weight
self.shared_program.frag['color'] = (1, 0, 0, 1)
self.set_gl_state(cull_face=False)
self._draw_mode = 'triangle_strip'
def _prepare_transforms(self, view):
# Set the two transforms required by the vertex shader:
tr = view.transforms
view_vert = view.view_program.vert
view_vert['visual_to_doc'] = tr.get_transform('visual', 'document')
view_vert['doc_to_render'] = tr.get_transform('document', 'render')
# As in the previous tutorial, we auto-generate a Visual+Node class for use
# in the scenegraph.
MyRect = scene.visuals.create_visual_node(MyRectVisual)
# Finally we will test the visual by displaying in a scene.
canvas = scene.SceneCanvas(keys='interactive', show=True)
# This time we add a ViewBox to let the user zoom/pan
view = canvas.central_widget.add_view()
view.camera = 'panzoom'
view.camera.rect = (0, 0, 800, 800)
# ..and add the rects to the view instead of canvas.scene
rects = [MyRect(100, 100, 200, 300, parent=view.scene),
MyRect(500, 100, 200, 300, parent=view.scene)]
# Again, rotate one rectangle to ensure the transforms are working as we
# expect.
tr = visuals.transforms.MatrixTransform()
tr.rotate(25, (0, 0, 1))
rects[1].transform = tr
# Add some text instructions
text = scene.visuals.Text("Drag right mouse button to zoom.", color='w',
anchor_x='left', parent=view, pos=(20, 30))
# ..and optionally start the event loop
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1:
app.run()
|
kaolin/ops/spc/points.py | tovacinni/kaolin | 3,747 | 11153924 | <gh_stars>1000+
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'points_to_morton',
'morton_to_points',
'points_to_corners',
'coords_to_trilinear',
'unbatched_points_to_octree',
'quantize_points'
]
import torch
from kaolin import _C
def quantize_points(x, level):
r"""Quantize [-1, 1] float coordinates in to [0, (2^level)-1] integer coords.
If a point is out of the range [-1, 1] it will be clipped to it.
Args:
x (torch.FloatTensor): floating point coordinates,
must but of last dimension 3.
level (int): Level of the grid
Returns
(torch.ShortTensor): Quantized 3D points, of same shape than x.
"""
res = 2 ** level
qpts = torch.floor(torch.clamp(res * (x + 1.0) / 2.0, 0, res - 1.)).short()
return qpts
def unbatched_points_to_octree(points, level, sorted=False):
r"""Convert (quantized) 3D points to an octree.
This function assumes that the points are all in the same frame of reference
of [0, 2^level]. Note that SPC.points does not satisfy this constraint.
Args:
points (torch.ShortTensor):
The Quantized 3d points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level.
level (int): Max level of octree, and the level of the points.
sorted (bool): True if the points are unique and sorted in morton order.
Returns:
(torch.ByteTensor): the generated octree,
of shape :math:`(2^\text{level}, 2^\text{level}, 2^\text{level})`.
"""
if not sorted:
unique = torch.unique(points.contiguous(), dim=0).contiguous()
morton = torch.sort(points_to_morton(unique).contiguous())[0]
points = morton_to_points(morton.contiguous())
return _C.ops.spc.points_to_octree(points.contiguous(), level)
def points_to_morton(points):
r"""Convert (quantized) 3D points to morton codes.
Args:
points (torch.ShortTensor):
Quantized 3D points. This is not exactly like SPC points hierarchies
as this is only the data for a specific level,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.LongTensor):
The morton code of the points,
of shape :math:`(\text{num_points})`
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 0, 1],
... [0, 0, 2],
... [0, 0, 3],
... [0, 1, 0]], device='cuda', dtype=torch.int16)
>>> points_to_morton(inputs)
tensor([0, 1, 8, 9, 2], device='cuda:0')
"""
shape = list(points.shape)[:-1]
points = points.reshape(-1, 3)
return _C.ops.spc.points_to_morton_cuda(points.contiguous()).reshape(*shape)
def morton_to_points(morton):
r"""Convert morton codes to points.
Args:
morton (torch.LongTensor): The morton codes of quantized 3D points,
of shape :math:`(\text{num_points})`.
Returns:
(torch.ShortInt):
The points quantized coordinates,
of shape :math:`(\text{num_points}, 3)`.
Examples:
>>> inputs = torch.tensor([0, 1, 8, 9, 2], device='cuda')
>>> morton_to_points(inputs)
tensor([[0, 0, 0],
[0, 0, 1],
[0, 0, 2],
[0, 0, 3],
[0, 1, 0]], device='cuda:0', dtype=torch.int16)
"""
shape = list(morton.shape)
shape.append(3)
morton = morton.reshape(-1)
return _C.ops.spc.morton_to_points_cuda(morton.contiguous()).reshape(*shape)
def points_to_corners(points):
r"""Calculates the corners of the points assuming each point is the 0th bit corner.
Args:
points (torch.ShortTensor): Quantized 3D points,
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.ShortTensor): Quantized 3D new points,
of shape :math:`(\text{num_points}, 8, 3)`.
Examples:
>>> inputs = torch.tensor([
... [0, 0, 0],
... [0, 2, 0]], device='cuda', dtype=torch.int16)
>>> points_to_corners(inputs)
tensor([[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]],
<BLANKLINE>
[[0, 2, 0],
[0, 2, 1],
[0, 3, 0],
[0, 3, 1],
[1, 2, 0],
[1, 2, 1],
[1, 3, 0],
[1, 3, 1]]], device='cuda:0', dtype=torch.int16)
"""
shape = list(points.shape)
shape.insert(-1, 8)
return _C.ops.spc.points_to_corners_cuda(points.contiguous()).reshape(*shape)
def coords_to_trilinear(coords, points):
r"""Calculates the coefficients for trilinear interpolation.
To interpolate with the coefficients, do:
``torch.sum(features * coeffs, dim=-1)``
with ``features`` of shape :math:`(\text{num_points}, 8)`
Args:
coords (torch.FloatTensor): Floating point 3D points,
of shape :math:`(\text{num_points}, 3)`.
points (torch.ShortTensor): Quantized 3D points (the 0th bit of the voxel x is in),
of shape :math:`(\text{num_points}, 3)`.
Returns:
(torch.FloatTensor): The trilinear interpolation coefficients,
of shape :math:`(\text{num_points}, 8)`.
"""
shape = list(points.shape)
shape[-1] = 8
points = points.reshape(-1, 3)
coords = coords.reshape(-1, 3)
return _C.ops.spc.coords_to_trilinear_cuda(coords.contiguous(), points.contiguous()).reshape(*shape)
|
JetMETAnalysis/Configuration/python/JetMETAnalysis_OutputModules_cff.py | ckamtsikis/cmssw | 852 | 11153933 | <reponame>ckamtsikis/cmssw
# The following comments couldn't be translated into the new config version:
#sumETOutputModuleAODSIM &
import FWCore.ParameterSet.Config as cms
from JetMETAnalysis.METSkims.metHigh_OutputModule_cfi import *
from JetMETAnalysis.METSkims.metLow_OutputModule_cfi import *
#include "JetMETAnalysis/METSkims/data/sumET_OutputModule.cfi"
#from JetMETAnalysis.JetSkims.onejet_OutputModule_cfi import *
#from JetMETAnalysis.JetSkims.photonjets_OutputModule_cfi import *
#include "JetMETAnalysis/JetSkims/data/dijetbalance_OutputModule.cfi"
JetMETAnalysisOutput = cms.Sequence(metHighOutputModuleFEVTSIM+metLowOutputModuleAODSIM)#+onejetOutputModuleAODSIM+photonjetsOutputModuleAODSIM)
|
ex39_sql_create/test.py | techieguy007/learn-more-python-the-hard-way-solutions | 466 | 11153940 | <filename>ex39_sql_create/test.py<gh_stars>100-1000
class Person(object):
def __init__(self, first_name,
last_name, age, pets):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.pets = pets
class Pet(object):
def __init__(self, name, breed,
age, dead):
self.name = name
self.breed = breed
self.age = age
self.dead = dead
self.owners = []
# simulate insert
fluffy = Pet('Fluffy', 'Unicorn', 12, False)
gigantor = Pet('Gigantor', 'Robot', 2, False)
pete = Person("Zed", "Shaw", 43, [fluffy, gigantor])
fluffy.owners.append(pete)
gigantor.owners.append(pete)
DB = {
'person': [ pete ],
'pet': [fluffy, gigantor],
}
dead_pets = [pet for pet in DB['pet'] if pet.dead == False]
print(dead_pets)
|
examples/streamlit/08_sentence_embedding_manifolds.py | milyiyo/nlu | 480 | 11153946 | import nlu
text= """You can visualize any of the 100 + Sentence Embeddings
with 10+ dimension reduction algorithms
and view the results in 3D, 2D, and 1D
which can be colored by various classifier labels!
"""
nlu.enable_streamlit_caching() # Optional caching the models, recommended
nlu.load('embed_sentence.bert').viz_streamlit_sentence_embed_manifold(text)
|
tester/test_model/test_reply2user.py | bukun/TorCMS | 243 | 11153949 | <reponame>bukun/TorCMS
# -*- coding:utf-8 -*-
from torcms.model.reply2user_model import MReply2User
from torcms.model.reply_model import MReply
from torcms.model.user_model import MUser
class TestMReply2User():
def setup(self):
print('setup 方法执行于本类中每条用例之前')
self.user = MUser()
self.reply = MReply()
self.r2u = MReply2User()
self.username = 'adminsadfl'
self.password = '<PASSWORD>'
self.user_uid = '12345'
self.reply_uid = '65412'
def add_user(self, **kwargs):
name = kwargs.get('user_name', self.username)
post_data = {
'user_name': name,
'user_pass': kwargs.get('user_pass', <PASSWORD>.password),
'user_email': kwargs.get('user_email', '<EMAIL>'),
}
self.user.create_user(post_data)
aa = self.user.get_by_name(name)
self.user_uid = aa.uid
def add_reply(self, **kwargs):
p_d = {
'post_id': 'gtyu',
'user_name': self.username,
'user_id': self.user_uid,
'category': '0',
'cnt_reply': kwargs.get('cnt_reply', 'kfjd速度很快很低'),
}
uid = self.reply.create_reply(p_d)
self.reply_uid = uid
self.r2u.create_reply(self.user_uid, uid)
def test_create_reply(self):
self.add_user()
self.add_reply()
self.r2u.create_reply(self.user_uid, self.reply_uid)
aa = self.r2u.get_voter_count(self.reply_uid)
assert aa >= 1
self.tearDown()
#
# def test_update(self):
# self.r2u.update()
def test_delete(self):
self.add_user()
self.add_reply()
self.r2u.create_reply(self.user_uid, self.reply_uid)
aa = self.r2u.get_voter_count(self.reply_uid)
assert aa >= 1
self.r2u.delete(self.reply_uid)
aa = self.r2u.get_voter_count(self.reply_uid)
assert aa == 0
self.tearDown()
def test_get_voter_count(self):
self.add_user()
self.add_reply()
self.r2u.create_reply(self.user_uid, self.reply_uid)
aa = self.r2u.get_voter_count(self.reply_uid)
assert aa >= 1
self.tearDown()
def tearDown(self):
print("function teardown")
tt = self.user.get_by_uid(self.user_uid)
if tt:
self.user.delete(tt.uid)
tt = self.reply.get_by_uid(self.reply_uid)
if tt:
self.reply.delete_by_uid(tt.uid)
self.r2u.delete(self.reply_uid)
|
v2/client_server/heartbeat.py | Dilepa/micropython-async | 443 | 11153979 | # flash.py Heartbeat code for simple uasyncio-based echo server
# Released under the MIT licence
# Copyright (c) <NAME> 2019
import uasyncio as asyncio
from sys import platform
async def heartbeat(tms):
if platform == 'pyboard': # V1.x or D series
from pyb import LED
led = LED(1)
elif platform == 'esp8266':
from machine import Pin
led = Pin(2, Pin.OUT, value=1)
elif platform == 'linux':
return # No LED
else:
raise OSError('Unsupported platform.')
while True:
if platform == 'pyboard':
led.toggle()
elif platform == 'esp8266':
led(not led())
await asyncio.sleep_ms(tms)
|
torch2trt/converters/dummy_converters.py | PogChamper/torch2trt | 3,363 | 11153985 | from torch2trt.torch2trt import *
def is_private(method):
method = method.split('.')[-1] # remove prefix
return method[0] == '_' and method[1] != '_'
def is_function_type(method):
fntype = eval(method + '.__class__.__name__')
return fntype == 'function' or fntype == 'builtin_function_or_method' or fntype == 'method_descriptor'
def get_methods(namespace):
methods = []
for method in dir(eval(namespace)):
full_method = namespace + '.' + method
if not is_private(full_method) and is_function_type(full_method):
methods.append(full_method)
return methods
TORCH_METHODS = []
TORCH_METHODS += get_methods('torch')
TORCH_METHODS += get_methods('torch.Tensor')
TORCH_METHODS += get_methods('torch.nn.functional')
for method in TORCH_METHODS:
@tensorrt_converter(method, is_real=False)
def warn_method(ctx):
print('Warning: Encountered known unsupported method %s' % ctx.method_str)
@tensorrt_converter('torch.Tensor.dim', is_real=False)
@tensorrt_converter('torch.Tensor.size', is_real=False)
def dont_warn(ctx):
pass
|
src/app/integrations/s3.py | denkasyanov/education-backend | 151 | 11153989 | import boto3
from botocore.client import Config
from django.conf import settings
from django.utils.functional import cached_property
class AppS3:
"""App-specific methods for directly calling s3 API"""
@cached_property
def client(self):
session = boto3.session.Session()
return session.client(
's3',
region_name=settings.AWS_S3_REGION_NAME,
endpoint_url=settings.AWS_S3_ENDPOINT_URL,
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
config=Config(signature_version='s3'),
)
def get_presigned_url(self, object_id: str, expires: int):
return self.client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': settings.AWS_STORAGE_BUCKET_NAME,
'Key': object_id,
'ResponseContentDisposition': 'attachment',
},
ExpiresIn=expires,
)
|
sagemaker_neo_compilation_jobs/mxnet_mnist/get_input.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 11153995 | <filename>sagemaker_neo_compilation_jobs/mxnet_mnist/get_input.py<gh_stars>1000+
import math
from tkinter import *
from tkinter import messagebox
import numpy as np
# track welcome status so we only display the welcome message once
welcome = False
# saved array size
width = 28.0
height = 28.0
# scale from array size to GUI size (for readability, ease of drawing/use)
scale = 10.0
# internal array data, initialize to zeros
array = np.zeros((int(width), int(height)))
def update(x, y):
"""
Update the internal array with given x/y coordinate
"""
global array
# compute location in array using scaling factor
real_x = math.floor(x / scale)
real_y = math.floor(y / scale)
# update array with value '1'
array[real_y][real_x] = 1
def paint(event):
"""
Event handler for mouse motion
Update the GUI with dots for given x/y coordinate
"""
global canvas
# compute size of dot based on scaling factor
x1, y1 = (event.x - scale), (event.y - scale)
x2, y2 = (event.x + scale), (event.y + scale)
# draw dot
canvas.create_oval(x1, y1, x2, y2, fill="black")
# update internal array
update(event.x, event.y)
def save(event):
"""
Event handler for mouse button release
Save the internal array to file
"""
global array
# save
np.save("input.npy", array)
# print array data to console (for understanding)
for y in range(int(height)):
s = ""
for x in range(int(width)):
s += str(int(array[y][x])) + " "
print(s)
# remind user of file name
print("saved to input.npy")
def clear(event):
"""
Event handler for mouse click
Clear internal array and drawing canvas to prepare for new digit
"""
global canvas
global array
# clear internal array
array = np.zeros((int(width), int(height)))
# clear drawing canvas
canvas.delete("all")
def focus(event):
"""
Event handler for gaining focus on window
Display welcome message the first time
"""
global welcome
# only display message once
if not welcome:
# open info pop up with instructions
messagebox.showinfo(
"Instructions",
"Click and drag to draw a digit as a single continuous stroke. Release the mouse to save 28x28 numpy array as 'input.npy' file on disk. Clicking and dragging again will reset and start over. Close the window to exit the python process",
parent=master,
)
# set flag to not repeat
welcome = True
#######################
# setup GUI
#######################
# setup window
try:
master = Tk()
except TclError as ex:
msg = ex.args[0]
if "display" in msg or "DISPLAY" in msg:
print(
"This script must be run in a terminal shell and on a machine with a GUI display like your local computer."
)
exit(1)
master.title("Draw a digit")
# register focus handler on window
master.bind("<FocusIn>", focus)
# setup drawing canvas
canvas = Canvas(master, width=width * scale, height=height * scale, background="gray75")
# if user resizes window, dont scale canvas
canvas.pack(expand=NO, fill=NONE)
# register handlers for canvas
canvas.bind("<B1-Motion>", paint)
canvas.bind("<ButtonRelease>", save)
canvas.bind("<Button-1>", clear)
# message at bottom of window
message = Label(master, text="Press and Drag the mouse to draw")
message.pack(side=BOTTOM)
# main GUI loop
mainloop()
|
third_party/shaderc/src/utils/update_build_version.py | zipated/src | 2,151 | 11154001 | <reponame>zipated/src
#!/usr/bin/env python
# Copyright 2016 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Updates build-version.inc in the current directory, unless the update is
# identical to the existing content.
#
# Args: <shaderc_dir> <spirv-tools_dir> <glslang_dir>
#
# For each directory, there will be a line in build-version.inc containing that
# directory's "git describe" output enclosed in double quotes and appropriately
# escaped.
from __future__ import print_function
import datetime
import os.path
import subprocess
import sys
OUTFILE = 'build-version.inc'
def command_output(cmd, dir):
"""Runs a command in a directory and returns its standard output stream.
Captures the standard error stream.
Raises a RuntimeError if the command fails to launch or otherwise fails.
"""
p = subprocess.Popen(cmd,
cwd=dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, _) = p.communicate()
if p.returncode != 0:
raise RuntimeError('Failed to run %s in %s' % (cmd, dir))
return stdout
def describe(dir):
"""Returns a string describing the current Git HEAD version as descriptively
as possible.
Runs 'git describe', or alternately 'git rev-parse HEAD', in dir. If
successful, returns the output; otherwise returns 'unknown hash, <date>'."""
try:
# decode() is needed here for Python3 compatibility. In Python2,
# str and bytes are the same type, but not in Python3.
# Popen.communicate() returns a bytes instance, which needs to be
# decoded into text data first in Python3. And this decode() won't
# hurt Python2.
return command_output(['git', 'describe'], dir).rstrip().decode()
except:
try:
return command_output(
['git', 'rev-parse', 'HEAD'], dir).rstrip().decode()
except:
return 'unknown hash, ' + datetime.date.today().isoformat()
def main():
if len(sys.argv) != 4:
print(
'usage: {0} <shaderc_dir> <spirv-tools_dir> <glslang_dir>'.format(sys.argv[0]))
sys.exit(1)
projects = ['shaderc', 'spirv-tools', 'glslang']
tags = [describe(p).replace('"', '\\"')
for p in sys.argv[1:]]
new_content = ''.join([
'"{} {}\\n"\n'.format(p, t)
for (p, t) in zip(projects, tags)])
if os.path.isfile(OUTFILE) and new_content == open(OUTFILE, 'r').read():
sys.exit(0)
open(OUTFILE, 'w').write(new_content)
if __name__ == '__main__':
main()
|
tests/numpy/signal.py | RoboticExplorationLab/micropython-ulab | 232 | 11154012 | <reponame>RoboticExplorationLab/micropython-ulab
import math
try:
from ulab import numpy as np
from ulab import scipy as spy
except ImportError:
import numpy as np
import scipy as spy
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float)
sos = np.array([[1, 2, 3, 1, 5, 6], [1, 2, 3, 1, 5, 6]],dtype=np.float)
result = spy.signal.sosfilt(sos, x)
ref_result = np.array([0.0000e+00, 1.0000e+00, -4.0000e+00, 2.4000e+01, -1.0400e+02, 4.4000e+02, -1.7280e+03, 6.5320e+03, -2.3848e+04, 8.4864e+04], dtype=np.float)
cmp_result = []
for p,q in zip(list(result), list(ref_result)):
cmp_result.append(math.isclose(p, q, rel_tol=1e-06, abs_tol=1e-06))
print(cmp_result)
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
sos = np.array([[1, 2, 3, 1, 5, 6], [1, 2, 3, 1, 5, 6]],dtype=np.float)
zi = np.array([[1, 2], [3, 4]],dtype=np.float)
y, zo = spy.signal.sosfilt(sos, x, zi=zi)
y_ref = np.array([ 4.00000e+00, -1.60000e+01, 6.30000e+01, -2.27000e+02, 8.03000e+02, -2.75100e+03, 9.27100e+03, -3.07750e+04, 1.01067e+05, -3.28991e+05], dtype=np.float)
zo_ref = np.array([[37242.0, 74835.],[1026187.0, 1936542.0]], dtype=np.float)
cmp_result = []
for p,q in zip(list(y), list(y_ref)):
cmp_result.append(math.isclose(p, q, rel_tol=1e-06, abs_tol=1e-06))
print(cmp_result)
cmp_result = []
for i in range(2):
temp = []
for j in range(2):
temp.append(math.isclose(zo[i][j], zo_ref[i][j], rel_tol=1E-9, abs_tol=1E-9))
cmp_result.append(temp)
print(cmp_result)
|
apps/web/migrations/0004_change_team_model_field_type.py | kaustubh-s1/EvalAI | 1,470 | 11154025 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-03-19 21:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("web", "0003_added_description_and_background_image_to_team_model")
]
operations = [
migrations.AlterField(
model_name="team",
name="email",
field=models.EmailField(blank=True, max_length=70, null=True),
),
migrations.AlterField(
model_name="team",
name="github_url",
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name="team",
name="linkedin_url",
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name="team",
name="personal_website",
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
tests/pytests/unit/states/postgresql/test_user.py | babs/salt | 9,425 | 11154050 | import pytest
import salt.modules.postgres as postgres
import salt.states.postgres_user as postgres_user
from tests.support.mock import create_autospec, patch
class ScramHash:
def __eq__(self, other):
return other.startswith("SCRAM-SHA-256$4096:")
@pytest.fixture(name="db_args")
def fixture_db_args():
return {
"runas": None,
"host": None,
"port": None,
"maintenance_db": None,
"user": None,
"password": None,
}
@pytest.fixture(name="md5_pw")
def fixture_md5_pw():
# 'md5' + md5('password' + '<PASSWORD>')
return "md55a231fcdb710d73268c4f44283487ba2"
@pytest.fixture(name="scram_pw")
def fixture_scram_pw():
# scram_sha_256('password')
return (
"SCRAM-SHA-256$4096:wLr5nqC+3F+r7FdQPnB+nA==$"
"0hn08ZdX8kirGaL4TM0j13digH9Wl365OOzCtAuF2pE=:"
"LzAh/MGUdjYkdbDzcOKpfGwa3WwPUsyGcY+TEnSpcto="
)
@pytest.fixture(name="existing_user")
def fixture_existing_user(md5_pw):
return {
"superuser": False,
"inherits privileges": True,
"can create roles": False,
"can create databases": False,
"can update system catalogs": None,
"can login": True,
"replication": False,
"connections": None,
"expiry time": None,
"defaults variables": "",
"password": <PASSWORD>,
"groups": [],
}
@pytest.fixture(name="test_mode")
def fixture_test_mode():
with patch.dict(postgres_user.__opts__, {"test": True}):
yield
@pytest.fixture(name="mocks")
def fixture_mocks():
return {
"postgres.role_get": create_autospec(postgres.role_get, return_value=None),
"postgres.user_exists": create_autospec(
postgres.user_exists, return_value=False
),
"postgres.user_create": create_autospec(
postgres.user_create, return_value=True
),
"postgres.user_update": create_autospec(
postgres.user_update, return_value=True
),
"postgres.user_remove": create_autospec(
postgres.user_remove, return_value=True
),
}
@pytest.fixture(autouse=True)
def setup_loader(mocks):
setup_loader_modules = {
postgres_user: {"__opts__": {"test": False}, "__salt__": mocks},
postgres: {"__opts__": {"test": False}},
}
with pytest.helpers.loader_mock(setup_loader_modules) as loader_mock:
yield loader_mock
# ==========
# postgres_user.present
# ==========
def test_present_create_basic(mocks, db_args):
assert postgres_user.present("username") == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="md5",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=None,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
@pytest.mark.usefixtures("test_mode")
def test_present_create_basic_test(mocks, db_args):
assert postgres_user.present("username") == {
"name": "username",
"result": None,
"changes": {},
"comment": "User username is set to be created",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_exists_basic(mocks, existing_user, db_args):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username") == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_create_basic_error(mocks, db_args):
mocks["postgres.user_create"].return_value = False
assert postgres_user.present("username") == {
"name": "username",
"result": False,
"changes": {},
"comment": "Failed to create user username",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=True, **db_args
)
mocks["postgres.user_create"].assert_called_once()
mocks["postgres.user_update"].assert_not_called()
def test_present_change_option(mocks, existing_user, db_args):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", replication=True) == {
"name": "username",
"result": True,
"changes": {"username": {"replication": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="md5",
superuser=None,
login=None,
inherit=None,
replication=True,
rolepassword=None,
valid_until=None,
groups=None,
**db_args
)
def test_present_create_md5_password(mocks, md5_pw, db_args):
assert postgres_user.present("username", password="password", encrypted=True) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=md5_pw,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_scram_password(mocks, db_args):
assert postgres_user.present(
"username", password="password", encrypted="scram-sha-256"
) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="scram-sha-256",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=ScramHash(),
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_plain_password(mocks, db_args):
assert postgres_user.present("username", password="password", encrypted=False) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=False,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword="password",
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_md5_password_default_plain(mocks, monkeypatch, md5_pw, db_args):
monkeypatch.setattr(postgres, "_DEFAULT_PASSWORDS_ENCRYPTION", False)
test_present_create_md5_password(mocks, md5_pw, db_args)
def test_present_create_md5_password_default_encrypted(
mocks, monkeypatch, md5_pw, db_args
):
monkeypatch.setattr(postgres, "_DEFAULT_PASSWORDS_ENCRYPTION", True)
assert postgres_user.present("username", password="password") == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=md5_pw,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_create_md5_prehashed(mocks, md5_pw, db_args):
assert postgres_user.present("username", password=md5_pw, encrypted=True) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=md5_pw,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_md5_matches(mocks, existing_user):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", password="password", encrypted=True) == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_md5_matches_prehashed(mocks, existing_user, md5_pw):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", password=md5_pw, encrypted=True) == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_scram_matches(mocks, existing_user, scram_pw):
existing_user["password"] = scram_pw
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username", password="password", encrypted="scram-sha-256"
) == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_scram_matches_prehashed(mocks, existing_user, scram_pw):
existing_user["password"] = scram_pw
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username", password=scram_pw, encrypted="scram-sha-256"
) == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_update_md5_password(mocks, existing_user, md5_pw, db_args):
existing_user["password"] = "<PASSWORD>"
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", password="password", encrypted=True) == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=md<PASSWORD>,
valid_until=None,
groups=None,
**db_args
)
def test_present_refresh_scram_password(mocks, existing_user, scram_pw, db_args):
existing_user["password"] = scram_pw
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username",
password="password",
encrypted="scram-sha-256",
refresh_password=True,
) == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=False, **db_args
)
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="scram-sha-256",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=<PASSWORD>(),
valid_until=None,
groups=None,
**db_args
)
def test_present_update_error(mocks, existing_user):
existing_user["password"] = "<PASSWORD>"
mocks["postgres.role_get"].return_value = existing_user
mocks["postgres.user_update"].return_value = False
assert postgres_user.present("username", password="password", encrypted=True) == {
"name": "username",
"result": False,
"changes": {},
"comment": "Failed to update user username",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once()
def test_present_update_password_no_check(mocks, existing_user, md5_pw, db_args):
del existing_user["password"]
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username", password="password", encrypted=True, refresh_password=True
) == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once_with(
"username", return_password=False, **db_args
)
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=<PASSWORD>,
valid_until=None,
groups=None,
**db_args
)
def test_present_create_default_password(mocks, md5_pw, db_args):
assert postgres_user.present(
"username", default_password="password", encrypted=True
) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=<PASSWORD>,
valid_until=None,
groups=None,
**db_args
)
def test_present_create_unused_default_password(mocks, md5_pw, db_args):
assert postgres_user.present(
"username", password="password", default_password="<PASSWORD>", encrypted=True
) == {
"name": "username",
"result": True,
"changes": {"username": "Present"},
"comment": "The user username has been created",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted=True,
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=<PASSWORD>_pw,
valid_until=None,
groups=None,
**db_args
)
mocks["postgres.user_update"].assert_not_called()
def test_present_existing_default_password(mocks, existing_user):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username", default_password="<PASSWORD>", encrypted=True, refresh_password=True
) == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is already present",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_not_called()
def test_present_plain_to_scram(mocks, existing_user, db_args):
existing_user["password"] = "password"
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username", password="password", encrypted="scram-sha-256"
) == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="scram-sha-256",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=ScramHash(),
valid_until=None,
groups=None,
**db_args
)
def test_present_plain_to_md5(mocks, existing_user, md5_pw, db_args):
existing_user["password"] = "password"
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", password="password", encrypted="md5") == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="md5",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=<PASSWORD>,
valid_until=None,
groups=None,
**db_args
)
def test_present_md5_to_scram(mocks, existing_user, db_args):
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present(
"username", password="password", encrypted="scram-sha-<PASSWORD>"
) == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="scram-sha-256",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=ScramHash(),
valid_until=None,
groups=None,
**db_args
)
def test_present_scram_to_md5(mocks, existing_user, scram_pw, md5_pw, db_args):
existing_user["password"] = scram_pw
mocks["postgres.role_get"].return_value = existing_user
assert postgres_user.present("username", password="password", encrypted="md5") == {
"name": "username",
"result": True,
"changes": {"username": {"password": True}},
"comment": "The user username has been updated",
}
mocks["postgres.role_get"].assert_called_once()
mocks["postgres.user_create"].assert_not_called()
mocks["postgres.user_update"].assert_called_once_with(
username="username",
createdb=None,
createroles=None,
encrypted="md5",
superuser=None,
login=None,
inherit=None,
replication=None,
rolepassword=<PASSWORD>,
valid_until=None,
groups=None,
**db_args
)
# ==========
# postgres_user.absent
# ==========
def test_absent_delete(mocks, db_args):
mocks["postgres.user_exists"].return_value = True
assert postgres_user.absent("username") == {
"name": "username",
"result": True,
"changes": {"username": "Absent"},
"comment": "User username has been removed",
}
mocks["postgres.user_exists"].assert_called_once_with("username", **db_args)
mocks["postgres.user_remove"].assert_called_once_with("username", **db_args)
@pytest.mark.usefixtures("test_mode")
def test_absent_test(mocks, db_args):
mocks["postgres.user_exists"].return_value = True
assert postgres_user.absent("username") == {
"name": "username",
"result": None,
"changes": {},
"comment": "User username is set to be removed",
}
mocks["postgres.user_exists"].assert_called_once_with("username", **db_args)
mocks["postgres.user_remove"].assert_not_called()
def test_absent_already(mocks, db_args):
mocks["postgres.user_exists"].return_value = False
assert postgres_user.absent("username") == {
"name": "username",
"result": True,
"changes": {},
"comment": "User username is not present, so it cannot be removed",
}
mocks["postgres.user_exists"].assert_called_once_with("username", **db_args)
mocks["postgres.user_remove"].assert_not_called()
def test_absent_error(mocks):
mocks["postgres.user_exists"].return_value = True
mocks["postgres.user_remove"].return_value = False
assert postgres_user.absent("username") == {
"name": "username",
"result": False,
"changes": {},
"comment": "User username failed to be removed",
}
mocks["postgres.user_exists"].assert_called_once()
mocks["postgres.user_remove"].assert_called_once()
|
csaps/_sspumv.py | espdev/csaps | 111 | 11154086 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Univariate/multivariate cubic smoothing spline implementation
"""
import functools
from typing import Optional, Union, Tuple, List
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as la
from scipy.interpolate import PPoly
from ._base import ISplinePPForm, ISmoothingSpline
from ._types import UnivariateDataType, MultivariateDataType
from ._reshape import to_2d, prod
class SplinePPForm(ISplinePPForm[np.ndarray, int], PPoly):
"""The base class for univariate/multivariate spline in piecewise polynomial form
Piecewise polynomial in terms of coefficients and breakpoints.
Notes
-----
Inherited from :py:class:`scipy.interpolate.PPoly`
"""
__module__ = 'csaps'
@property
def breaks(self) -> np.ndarray:
return self.x
@property
def coeffs(self) -> np.ndarray:
return self.c
@property
def order(self) -> int:
return self.c.shape[0]
@property
def pieces(self) -> int:
return self.c.shape[1]
@property
def ndim(self) -> int:
"""Returns the number of spline dimensions
The number of dimensions is product of shape without ``shape[self.axis]``.
"""
shape = list(self.shape)
shape.pop(self.axis)
return prod(shape)
@property
def shape(self) -> Tuple[int]:
"""Returns the source data shape
"""
shape: List[int] = list(self.c.shape[2:])
shape.insert(self.axis, self.c.shape[1] + 1)
return tuple(shape)
def __repr__(self): # pragma: no cover
return (
f'{type(self).__name__}\n'
f' breaks: {self.breaks}\n'
f' coeffs shape: {self.coeffs.shape}\n'
f' data shape: {self.shape}\n'
f' axis: {self.axis}\n'
f' pieces: {self.pieces}\n'
f' order: {self.order}\n'
f' ndim: {self.ndim}\n'
)
class CubicSmoothingSpline(ISmoothingSpline[
SplinePPForm,
float,
UnivariateDataType,
int,
Union[bool, str]
]):
"""Cubic smoothing spline
The cubic spline implementation for univariate/multivariate data.
Parameters
----------
xdata : np.ndarray, sequence, vector-like
X input 1-D data vector (data sites: ``x1 < x2 < ... < xN``)
ydata : np.ndarray, vector-like, sequence[vector-like]
Y input 1-D data vector or ND-array with shape[axis] equal of `xdata` size)
weights : [*Optional*] np.ndarray, list
Weights 1-D vector with size equal of ``xdata`` size
smooth : [*Optional*] float
Smoothing parameter in range [0, 1] where:
- 0: The smoothing spline is the least-squares straight line fit
- 1: The cubic spline interpolant with natural condition
axis : [*Optional*] int
Axis along which ``ydata`` is assumed to be varying.
Meaning that for x[i] the corresponding values are np.take(ydata, i, axis=axis).
By default is -1 (the last axis).
normalizedsmooth : [*Optional*] bool
If True, the smooth parameter is normalized such that results are invariant to xdata range
and less sensitive to nonuniformity of weights and xdata clumping
.. versionadded:: 1.1.0
"""
__module__ = 'csaps'
def __init__(self,
xdata: UnivariateDataType,
ydata: MultivariateDataType,
weights: Optional[UnivariateDataType] = None,
smooth: Optional[float] = None,
axis: int = -1,
normalizedsmooth: bool = False):
x, y, w, shape, axis = self._prepare_data(xdata, ydata, weights, axis)
coeffs, smooth = self._make_spline(x, y, w, smooth, shape, normalizedsmooth)
spline = SplinePPForm.construct_fast(coeffs, x, axis=axis)
self._smooth = smooth
self._spline = spline
def __call__(self,
x: UnivariateDataType,
nu: Optional[int] = None,
extrapolate: Optional[Union[bool, str]] = None) -> np.ndarray:
"""Evaluate the spline for given data
Parameters
----------
x : 1-d array-like
Points to evaluate the spline at.
nu : [*Optional*] int
Order of derivative to evaluate. Must be non-negative.
extrapolate : [*Optional*] bool or 'periodic'
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu is None:
nu = 0
return self._spline(x, nu=nu, extrapolate=extrapolate)
@property
def smooth(self) -> float:
"""Returns the smoothing factor
Returns
-------
smooth : float
Smoothing factor in the range [0, 1]
"""
return self._smooth
@property
def spline(self) -> SplinePPForm:
"""Returns the spline description in `SplinePPForm` instance
Returns
-------
spline : SplinePPForm
The spline representation in :class:`SplinePPForm` instance
"""
return self._spline
@staticmethod
def _prepare_data(xdata, ydata, weights, axis):
xdata = np.asarray(xdata, dtype=np.float64)
ydata = np.asarray(ydata, dtype=np.float64)
if xdata.ndim > 1:
raise ValueError("'xdata' must be a vector")
if xdata.size < 2:
raise ValueError("'xdata' must contain at least 2 data points.")
axis = ydata.ndim + axis if axis < 0 else axis
if ydata.shape[axis] != xdata.size:
raise ValueError(
f"'ydata' data must be a 1-D or N-D array with shape[{axis}] "
f"that is equal to 'xdata' size ({xdata.size})")
# Rolling axis for using its shape while constructing coeffs array
shape = np.rollaxis(ydata, axis).shape
# Reshape ydata N-D array to 2-D NxM array where N is the data
# dimension and M is the number of data points.
ydata = to_2d(ydata, axis)
if weights is None:
weights = np.ones_like(xdata)
else:
weights = np.asarray(weights, dtype=np.float64)
if weights.size != xdata.size:
raise ValueError('Weights vector size must be equal of xdata size')
return xdata, ydata, weights, shape, axis
@staticmethod
def _compute_smooth(a, b):
"""
The calculation of the smoothing spline requires the solution of a
linear system whose coefficient matrix has the form p*A + (1-p)*B, with
the matrices A and B depending on the data sites x. The default value
of p makes p*trace(A) equal (1 - p)*trace(B).
"""
def trace(m: sp.dia_matrix):
return m.diagonal().sum()
return 1. / (1. + trace(a) / (6. * trace(b)))
@staticmethod
def _normalize_smooth(x: np.ndarray, w: np.ndarray, smooth: Optional[float]):
"""
See the explanation here: https://github.com/espdev/csaps/pull/47
"""
span = np.ptp(x)
eff_x = 1 + (span ** 2) / np.sum(np.diff(x) ** 2)
eff_w = np.sum(w) ** 2 / np.sum(w ** 2)
k = 80 * (span ** 3) * (x.size ** -2) * (eff_x ** -0.5) * (eff_w ** -0.5)
s = 0.5 if smooth is None else smooth
p = s / (s + (1 - s) * k)
return p
@staticmethod
def _make_spline(x, y, w, smooth, shape, normalizedsmooth):
pcount = x.size
dx = np.diff(x)
if not all(dx > 0): # pragma: no cover
raise ValueError(
"Items of 'xdata' vector must satisfy the condition: x1 < x2 < ... < xN")
dy = np.diff(y, axis=1)
dy_dx = dy / dx
if pcount == 2:
# The corner case for the data with 2 points (1 breaks interval)
# In this case we have 2-ordered spline and linear interpolation in fact
yi = y[:, 0][:, np.newaxis]
c_shape = (2, pcount - 1) + shape[1:]
c = np.vstack((dy_dx, yi)).reshape(c_shape)
p = 1.0
return c, p
# Create diagonal sparse matrices
diags_r = np.vstack((dx[1:], 2 * (dx[1:] + dx[:-1]), dx[:-1]))
r = sp.spdiags(diags_r, [-1, 0, 1], pcount - 2, pcount - 2)
dx_recip = 1. / dx
diags_qtw = np.vstack((dx_recip[:-1], -(dx_recip[1:] + dx_recip[:-1]), dx_recip[1:]))
diags_sqrw_recip = 1. / np.sqrt(w)
qtw = (sp.diags(diags_qtw, [0, 1, 2], (pcount - 2, pcount)) @
sp.diags(diags_sqrw_recip, 0, (pcount, pcount)))
qtw = qtw @ qtw.T
p = smooth
if normalizedsmooth:
p = CubicSmoothingSpline._normalize_smooth(x, w, smooth)
elif smooth is None:
p = CubicSmoothingSpline._compute_smooth(r, qtw)
pp = (6. * (1. - p))
# Solve linear system for the 2nd derivatives
a = pp * qtw + p * r
b = np.diff(dy_dx, axis=1).T
u = la.spsolve(a, b)
if u.ndim < 2:
u = u[np.newaxis]
if y.shape[0] == 1:
u = u.T
dx = dx[:, np.newaxis]
vpad = functools.partial(np.pad, pad_width=[(1, 1), (0, 0)], mode='constant')
d1 = np.diff(vpad(u), axis=0) / dx
d2 = np.diff(vpad(d1), axis=0)
diags_w_recip = 1. / w
w = sp.diags(diags_w_recip, 0, (pcount, pcount))
yi = y.T - (pp * w) @ d2
pu = vpad(p * u)
c1 = np.diff(pu, axis=0) / dx
c2 = 3. * pu[:-1, :]
c3 = np.diff(yi, axis=0) / dx - dx * (2. * pu[:-1, :] + pu[1:, :])
c4 = yi[:-1, :]
c_shape = (4, pcount - 1) + shape[1:]
c = np.vstack((c1, c2, c3, c4)).reshape(c_shape)
return c, p
|
dp/cloud/python/magma/db_service/migrations/versions/6234f59dd8e4_.py | tenpercent/magma | 539 | 11154092 | <gh_stars>100-1000
"""empty message
Revision ID: 6234f59dd8e4
Revises: b0cad5321c88
Create Date: 2022-02-09 10:14:55.021034
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '6234f59dd8e4'
down_revision = 'b0cad5321c88'
branch_labels = None
depends_on = None
def upgrade():
"""
Run upgrade
"""
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(
op.f('ix_domain_proxy_logs_cbsd_serial_number'),
'domain_proxy_logs', ['cbsd_serial_number'], unique=False,
)
op.create_index(
op.f('ix_domain_proxy_logs_created_date'),
'domain_proxy_logs', ['created_date'], unique=False,
)
op.create_index(
op.f('ix_domain_proxy_logs_fcc_id'),
'domain_proxy_logs', ['fcc_id'], unique=False,
)
op.create_index(
op.f('ix_domain_proxy_logs_response_code'),
'domain_proxy_logs', ['response_code'], unique=False,
)
op.create_index(
op.f('ix_domain_proxy_logs_log_name'),
'domain_proxy_logs', ['log_name'], unique=False,
)
# ### end Alembic commands ###
def downgrade():
"""
Run downgrade
"""
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(
op.f('ix_domain_proxy_logs_response_code'),
table_name='domain_proxy_logs',
)
op.drop_index(
op.f('ix_domain_proxy_logs_fcc_id'),
table_name='domain_proxy_logs',
)
op.drop_index(
op.f('ix_domain_proxy_logs_created_date'),
table_name='domain_proxy_logs',
)
op.drop_index(
op.f('ix_domain_proxy_logs_cbsd_serial_number'),
table_name='domain_proxy_logs',
)
op.drop_index(
op.f('ix_domain_proxy_logs_log_name'),
table_name='domain_proxy_logs',
)
# ### end Alembic commands ###
|
contrib/python/parso/py2/tests/normalizer_issue_files/allowed_syntax_python3.5.py | jochenater/catboost | 6,989 | 11154106 | <gh_stars>1000+
"""
Mostly allowed syntax in Python 3.5.
"""
async def foo():
await bar()
#: E901
yield from []
return
#: E901
return ''
# With decorator it's a different statement.
@bla
async def foo():
await bar()
#: E901
yield from []
return
#: E901
return ''
|
demos/unreal_proxies/two_drones/unreal_proxy.py | TUBSAPISS2018/DroneSimLab | 122 | 11154131 | <reponame>TUBSAPISS2018/DroneSimLab<filename>demos/unreal_proxies/two_drones/unreal_proxy.py
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import zmq,pickle,time
import struct
import config
from Wrappers import phandlers as ph
import numpy as np
import cv2
#needed texture objects in the unreal project
drone_texture_names=['/Game/TextureRenderTarget2D_0','/Game/TextureRenderTarget2D_1']
drone_textures_down_names=['/Game/TextureRenderTarget2D_Down']
drone_textures_depth_names=['/Game/TextureRenderTarget2D_depth']
#needed actors
drone_actors_names=['Parrot_Drone_0','Parrot_Drone_1']
context = zmq.Context()
show_cv=False
pub_cv=True
drone_subs=[]
for ind in range(config.n_drones):
socket_sub = context.socket(zmq.SUB)
_,port=config.zmq_pub_drone_fdm
drone_ip='172.17.0.%d'%(ind+2) #172.17.0.1 for the docker host and 172.17.0.2 for first drone etc...
addr='tcp://%s:%d'%(drone_ip,port)
print("connecting to",addr)
socket_sub.connect(addr)
socket_sub.setsockopt(zmq.SUBSCRIBE,config.topic_sitl_position_report)
drone_subs.append(socket_sub)
socket_pub = context.socket(zmq.PUB)
socket_pub.bind("tcp://%s:%d" % config.zmq_pub_unreal_proxy )
start=time.time()
def main_loop(gworld):
print('-- actors --')
for p in ph.GetActorsNames(gworld):
print(p)
print('-- textures --')
drone_textures=[]
for tn in drone_texture_names:
drone_textures.append(ph.GetTextureByName(tn))
drone_textures_down=[]
for tn in drone_textures_down_names:
drone_textures_down.append(ph.GetTextureByName(tn))
drone_textures_depth=[]
for tn in drone_textures_depth_names:
drone_textures_depth.append(ph.GetTextureByName(tn))
if not all(drone_textures):
print("Error, Could not find all textures")
while 1:
yield
drone_actors=[]
for drn in drone_actors_names:
drone_actors.append(ph.FindActorByName(gworld,drn))
if not all(drone_actors):
print("Error, Could not find all drone actors")
while 1:
yield
for _ in range(10): #need to send it a few time don't know why.
print('sending state main loop')
socket_pub.send_multipart([config.topic_unreal_state,b'main_loop'])
yield
drone_start_positions=[np.array(ph.GetActorLocation(drone_actor)) for drone_actor in drone_actors]
positions=[None for _ in range(config.n_drones)]
while 1:
for drone_index in range(config.n_drones):
socket_sub=drone_subs[drone_index]
drone_actor=drone_actors[drone_index]
while len(zmq.select([socket_sub],[],[],0)[0])>0:
topic, msg = socket_sub.recv_multipart()
positions[drone_index]=pickle.loads(msg)
#print('-----',positions[drone_index])
position=positions[drone_index]
if position is not None:
new_pos=drone_start_positions[drone_index]+np.array([position['posx'],position['posy'],position['posz']])*100 #turn to cm
#print('-----',drone_index,new_pos)
ph.SetActorLocation(drone_actor,new_pos)
ph.SetActorRotation(drone_actor,(position['roll'],position['pitch'],position['yaw']))
positions[drone_index]=None
yield
for drone_index in range(config.n_drones):
#img=cv2.resize(ph.GetTextureData(drone_textures[drone_index]),(1024,1024),cv2.INTER_LINEAR)
topics=[]
imgs=[]
img=ph.GetTextureData(drone_textures[drone_index])
topics.append(config.topic_unreal_drone_rgb_camera%drone_index)
imgs.append(img)
if drone_index<len(drone_textures_down):
img_down=ph.GetTextureData(drone_textures_down[drone_index])
topics.append(config.topic_unreal_drone_rgb_camera%drone_index+b'down')
imgs.append(img_down)
if drone_index<len(drone_textures_depth):
img_depth=ph.GetTextureData16f(drone_textures_depth[drone_index],channels=[0,1,2,3]) #depth data will be in A componnent
#img_depth=ph.GetTextureData(drone_textures_depth[drone_index],channels=[2]) #depth data will be in red componnent
topics.append(config.topic_unreal_drone_rgb_camera%drone_index+b'depth')
imgs.append(img_depth)
#topics=[config.topic_unreal_drone_rgb_camera%drone_index,
# config.topic_unreal_drone_rgb_camera%drone_index+b'down',
# config.topic_unreal_drone_rgb_camera%drone_index+b'depth']
#imgs=[ ph.GetTextureData(drone_textures[drone_index]),
# ph.GetTextureData(drone_textures_down[drone_index]),
# ph.GetTextureData(drone_textures_depth[drone_index],channels=[2])]
if pub_cv:
for topic,img in zip(topics,imgs):
#socket_pub.send_multipart([topic,pickle.dumps(img,2)])
#print('--->',img.shape)
socket_pub.send_multipart([topic,struct.pack('lll',*img.shape),img.tostring()])
#socket_pub.send_multipart([topic,pickle.dumps(img,-1)])
if show_cv:
cv2.imshow('drone camera %d'%drone_index,img)
cv2.waitKey(1)
def kill():
print('done!')
socket_pub.send_multipart([config.topic_unreal_state,b'kill'])
if show_cv:
cv2.destroyAllWindows()
for _ in range(10):
cv2.waitKey(10)
if __name__=="__main__":
while 1:
for drone_index in range(config.n_drones):
socket_sub=drone_subs[drone_index]
while len(zmq.select([socket_sub],[],[],0)[0])>0:
topic, msg = socket_sub.recv_multipart()
print("got ",topic)
|
gen_largest_num_frm_list.py | deepak5998/Py | 726 | 11154133 | # Write a function that given a list of non
# negative integers, arranges them such that
# they form the largest possible number. For
# example, given [50, 2, 1, 9], the largest
# formed number is 95021
from itertools import permutations
def generate_largest_number(arr):
gen_nums = []
for i in permutations(arr, len(arr)):
gen_nums.append("".join(map(str, i)))
return max(gen_nums)
arr = [54, 546, 548, 60]
generate_largest_number(arr)
|
nodes/1.x/python/RevitLinkType.AttachmentType.py | andydandy74/ClockworkForDynamo | 147 | 11154138 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
links = UnwrapElement(IN[0])
def GetAttachmentType(link):
if hasattr(link, "AttachmentType"): return link.AttachmentType.ToString()
else: return None
if isinstance(IN[0], list): OUT = [GetAttachmentType(x) for x in links]
else: OUT = GetAttachmentType(links) |
pyfr/integrators/dual/phys/steppers.py | pv101/PyFR | 185 | 11154159 | # -*- coding: utf-8 -*-
import math
from pyfr.integrators.dual.phys.base import BaseDualIntegrator
class BaseDualStepper(BaseDualIntegrator):
pass
class BaseDIRKStepper(BaseDualStepper):
stepper_nregs = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.fsal:
self.b = self.a[-1]
self.c = [sum(row) for row in self.a]
@property
def stage_nregs(self):
return self.nstages
def step(self, t, dt):
for s, (sc, tc) in enumerate(zip(self.a, self.c)):
self.pseudointegrator.init_stage(s, sc, dt)
self.pseudointegrator.pseudo_advance(t + dt*tc)
self._finalize_step()
def _finalize_step(self):
if not self.fsal:
bcoeffs = [bt*self._dt for bt in self.b]
self.pseudointegrator.obtain_solution(bcoeffs)
self.pseudointegrator.store_current_soln()
class DualBackwardEulerStepper(BaseDIRKStepper):
stepper_name = 'backward-euler'
nstages = 1
fsal = True
a = [[1]]
class SDIRK33Stepper(BaseDIRKStepper):
stepper_name = 'sdirk33'
nstages = 3
fsal = True
_at = math.atan(0.5**1.5)/3
_a_lam = (3**0.5*math.sin(_at) - math.cos(_at))/2**0.5 + 1
a = [
[_a_lam],
[0.5*(1 - _a_lam), _a_lam],
[(4 - 1.5*_a_lam)*_a_lam - 0.25, (1.5*_a_lam - 5)*_a_lam + 1.25, _a_lam]
]
class SDIRK43Stepper(BaseDIRKStepper):
stepper_name = 'sdirk43'
nstages = 3
fsal = False
_a_lam = (3 + 2*3**0.5*math.cos(math.pi/18))/6
a = [
[_a_lam],
[0.5 - _a_lam, _a_lam],
[2*_a_lam, 1 - 4*_a_lam, _a_lam]
]
_b_rlam = 1/(6*(1 - 2*_a_lam)*(1 - 2*_a_lam))
b = [_b_rlam, 1 - 2*_b_rlam, _b_rlam]
|
samples/vsphere/vcenter/hcl/compatibility_data_update_sample.py | JKraftman/vsphere-automation-sdk-python | 589 | 11154188 | <filename>samples/vsphere/vcenter/hcl/compatibility_data_update_sample.py
#!/usr/bin/env python
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2019. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = '7.0+'
from com.vmware.esx.hcl_client import CompatibilityData
from samples.vsphere.vcenter.hcl.utils import get_configuration
from samples.vsphere.common import sample_cli, sample_util
class CompatibilityDataUpdateSample(object):
"""
Sample demonstrating vCenter HCL Compatibility Data Update Operation
Sample Prerequisites:
vCenter on linux platform
"""
def __init__(self):
parser = sample_cli.build_arg_parser()
args = sample_util.process_cli_args(parser.parse_args())
config = get_configuration(args.server, args.username,
args.password,
args.skipverification)
self.api_client = CompatibilityData(config)
def run(self):
"""
Calls the HCL Compatibility Data Update POST API to update the HCL Datastore on the vCenter
"""
data_update_info = self.api_client.update_task()
print("Compatibility Data Update Task ID : ", data_update_info.get_task_id())
def main():
"""
Entry point for the CompatibilityDataUpdateSample client
"""
dataUpdateSample = CompatibilityDataUpdateSample()
dataUpdateSample.run()
if __name__ == '__main__':
main()
|
capstone/capapi/tests/test_resources.py | ChefAndy/capstone | 134 | 11154218 | from elasticsearch.exceptions import RequestError
import pytest
from capapi.documents import CaseDocument
from capapi.resources import parallel_execute
@pytest.mark.django_db(databases=['capdb'])
def test_parallel_execute(three_cases, elasticsearch):
# run search in parallel
expected_ids = [str(three_cases[0].id), str(three_cases[1].id)]
results = parallel_execute(CaseDocument.search().filter('terms', id=expected_ids), desired_docs=3)
assert sorted(results) == expected_ids
# errors are raised
with pytest.raises(RequestError):
parallel_execute(CaseDocument.search().sort('invalid'), desired_docs=1)
|
doc/tutorials_src/model_selection.py | fatiando/verde | 415 | 11154237 | # Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
.. _model_selection:
Model Selection
===============
In :ref:`model_evaluation`, we saw how to check the performance of an
interpolator using cross-validation. We found that the default parameters for
:class:`verde.Spline` are not good for predicting our sample air temperature
data. Now, let's see how we can tune the :class:`~verde.Spline` to improve the
cross-validation performance.
Once again, we'll start by importing the required packages and loading our
sample data.
"""
import itertools
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
import pyproj
import verde as vd
data = vd.datasets.fetch_texas_wind()
# Use Mercator projection because Spline is a Cartesian gridder
projection = pyproj.Proj(proj="merc", lat_ts=data.latitude.mean())
proj_coords = projection(data.longitude.values, data.latitude.values)
region = vd.get_region((data.longitude, data.latitude))
# The desired grid spacing in degrees
spacing = 15 / 60
###############################################################################
# Before we begin tuning, let's reiterate what the results were with the
# default parameters.
spline_default = vd.Spline()
score_default = np.mean(
vd.cross_val_score(spline_default, proj_coords, data.air_temperature_c)
)
spline_default.fit(proj_coords, data.air_temperature_c)
print("R² with defaults:", score_default)
###############################################################################
# Tuning
# ------
#
# :class:`~verde.Spline` has many parameters that can be set to modify the
# final result. Mainly the ``damping`` regularization parameter and the
# ``mindist`` "fudge factor" which smooths the solution. Would changing the
# default values give us a better score?
#
# We can answer these questions by changing the values in our ``spline`` and
# re-evaluating the model score repeatedly for different values of these
# parameters. Let's test the following combinations:
dampings = [None, 1e-4, 1e-3, 1e-2]
mindists = [5e3, 10e3, 50e3, 100e3]
# Use itertools to create a list with all combinations of parameters to test
parameter_sets = [
dict(damping=combo[0], mindist=combo[1])
for combo in itertools.product(dampings, mindists)
]
print("Number of combinations:", len(parameter_sets))
print("Combinations:", parameter_sets)
###############################################################################
# Now we can loop over the combinations and collect the scores for each
# parameter set.
spline = vd.Spline()
scores = []
for params in parameter_sets:
spline.set_params(**params)
score = np.mean(vd.cross_val_score(spline, proj_coords, data.air_temperature_c))
scores.append(score)
print(scores)
###############################################################################
# The largest score will yield the best parameter combination.
best = np.argmax(scores)
print("Best score:", scores[best])
print("Score with defaults:", score_default)
print("Best parameters:", parameter_sets[best])
###############################################################################
# **That is a nice improvement over our previous score!**
#
# This type of tuning is important and should always be performed when using a
# new gridder or a new dataset. However, the above implementation requires a
# lot of coding. Fortunately, Verde provides convenience classes that perform
# the cross-validation and tuning automatically when fitting a dataset.
###############################################################################
# Cross-validated gridders
# ------------------------
#
# The :class:`verde.SplineCV` class provides a cross-validated version of
# :class:`verde.Spline`. It has almost the same interface but does all of the
# above automatically when fitting a dataset. The only difference is that you
# must provide a list of ``damping`` and ``mindist`` parameters to try instead
# of only a single value:
spline = vd.SplineCV(
dampings=dampings,
mindists=mindists,
)
###############################################################################
# Calling :meth:`~verde.SplineCV.fit` will run a grid search over all parameter
# combinations to find the one that maximizes the cross-validation score.
spline.fit(proj_coords, data.air_temperature_c)
###############################################################################
# The estimated best ``damping`` and ``mindist``, as well as the
# cross-validation scores, are stored in class attributes:
print("Highest score:", spline.scores_.max())
print("Best damping:", spline.damping_)
print("Best mindist:", spline.mindist_)
###############################################################################
# The cross-validated gridder can be used like any other gridder (including in
# :class:`verde.Chain` and :class:`verde.Vector`):
grid = spline.grid(
region=region,
spacing=spacing,
projection=projection,
dims=["latitude", "longitude"],
data_names="temperature",
)
print(grid)
###############################################################################
# Like :func:`verde.cross_val_score`, :class:`~verde.SplineCV` can also run the
# grid search in parallel using `Dask <https://dask.org/>`__ by specifying the
# ``delayed`` attribute:
spline = vd.SplineCV(dampings=dampings, mindists=mindists, delayed=True)
###############################################################################
# Unlike :func:`verde.cross_val_score`, calling :meth:`~verde.SplineCV.fit`
# does **not** result in :func:`dask.delayed` objects. The full grid search is
# executed and the optimal parameters are found immediately.
spline.fit(proj_coords, data.air_temperature_c)
print("Best damping:", spline.damping_)
print("Best mindist:", spline.mindist_)
###############################################################################
# The one caveat is the that the ``scores_`` attribute will be a list of
# :func:`dask.delayed` objects instead because the scores are only computed as
# intermediate values in the scheduled computations.
print("Delayed scores:", spline.scores_)
###############################################################################
# Calling :func:`dask.compute` on the scores will calculate their values but
# will unfortunately run the entire grid search again. So using
# ``delayed=True`` is not recommended if you need the scores of each parameter
# combination.
###############################################################################
# The importance of tuning
# ------------------------
#
# To see the difference that tuning has on the results, we can make a grid
# with the best configuration and see how it compares to the default result.
grid_default = spline_default.grid(
region=region,
spacing=spacing,
projection=projection,
dims=["latitude", "longitude"],
data_names="temperature",
)
###############################################################################
# Let's plot our grids side-by-side:
mask = vd.distance_mask(
(data.longitude, data.latitude),
maxdist=3 * spacing * 111e3,
coordinates=vd.grid_coordinates(region, spacing=spacing),
projection=projection,
)
grid = grid.where(mask)
grid_default = grid_default.where(mask)
plt.figure(figsize=(14, 8))
for i, title, grd in zip(range(2), ["Defaults", "Tuned"], [grid_default, grid]):
ax = plt.subplot(1, 2, i + 1, projection=ccrs.Mercator())
ax.set_title(title)
pc = grd.temperature.plot.pcolormesh(
ax=ax,
cmap="plasma",
transform=ccrs.PlateCarree(),
vmin=data.air_temperature_c.min(),
vmax=data.air_temperature_c.max(),
add_colorbar=False,
add_labels=False,
)
plt.colorbar(pc, orientation="horizontal", aspect=50, pad=0.05).set_label("C")
ax.plot(
data.longitude, data.latitude, ".k", markersize=1, transform=ccrs.PlateCarree()
)
vd.datasets.setup_texas_wind_map(ax)
plt.show()
###############################################################################
# Notice that, for sparse data like these, **smoother models tend to be better
# predictors**. This is a sign that you should probably not trust many of the
# short wavelength features that we get from the defaults.
|
attic/win32/toga_win32/widgets/container.py | luizoti/toga | 1,261 | 11154261 | from toga_cassowary.widget import Container as CassowaryContainer
class Win32Container(object):
def add(self, widget):
pass
class Container(CassowaryContainer):
def _create_container(self):
# No impl is required for a container, but we need a placeholder
# to keep the cross-platform logic happy.
return Win32Container()
def _resize(self, width, height):
with self._layout_manager.layout(width, height):
for widget in self._layout_manager.children:
widget._resize()
def _set_app(self, app):
for child in self.children:
child.app = app
def _set_window(self, window):
for child in self.children:
child.window = window
child.startup()
@property
def _width_hint(self):
width = self._layout_manager.bounding_box.width.value
print("PREFERRED WIDTH", width)
return width, width
@property
def _height_hint(self):
height = self._layout_manager.bounding_box.height.value
print("PREFERRED HEIGHT", height)
return height, height
|
translator/utils.py | ttslr/gtos | 172 | 11154288 | <gh_stars>100-1000
import torch
from torch import nn
import math
import numpy as np
def move_to_device(maybe_tensor, device):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.to(device)
elif isinstance(maybe_tensor, np.ndarray):
return torch.from_numpy(maybe_tensor).to(device).contiguous()
elif isinstance(maybe_tensor, dict):
return {
key: move_to_device(value, device)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [move_to_device(x, device) for x in maybe_tensor]
else:
return maybe_tensor
def compute_f_by_tensor(input, target, mask):
input = input.view(-1).tolist()
target = target.view(-1).tolist()
mask = mask.view(-1).tolist()
tp, fp, tn, fn = 0., 0., 0., 0.
for i, t, m in zip(input, target, mask):
if m == 1:
continue
else:
if i == 1:
if t == 1:
tp +=1
else:
fp +=1
else:
if t == 1:
fn +=1
else:
tn +=1
if tp == 0:
return 0., 0., 0.
P = tp / (tp + fp)
R = tp / (tp + fn)
F = 2*P*R/(P+R)
return P, R, F
def gelu_fast(x):
if not hasattr(gelu_fast, "_a"):
gelu_fast._a = math.sqrt(2 / math.pi)
return 0.5 * x * (1 + torch.tanh(gelu_fast._a * (x + 0.044715 * torch.pow(x, 3))))
def gelu(x: torch.Tensor) -> torch.Tensor:
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def label_smoothed_nll_loss(log_probs, target, eps):
#log_probs: N x C
#target: N
nll_loss = -log_probs.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)
if eps == 0.:
return nll_loss
smooth_loss = -log_probs.sum(dim=-1)
eps_i = eps / log_probs.size(-1)
loss = (1. - eps) * nll_loss + eps_i * smooth_loss
return loss
|
hkust-gmission/gmission/models/crowdsourcing.py | gmission/gmission | 251 | 11154296 | __author__ = 'chenzhao'
from base import *
# type = text / image / selection
class HIT(db.Model, BasicModelMixin):
__tablename__ = 'hit'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(20))
title = db.Column(db.String(500))
description = db.Column(db.TEXT)
attachment_id = db.Column(db.Integer, db.ForeignKey('attachment.id'))
attachment = db.relationship('Attachment', foreign_keys=attachment_id)
campaign_id = db.Column(db.Integer, db.ForeignKey('campaign.id'))
campaign = db.relationship('Campaign', lazy='select')
credit = db.Column(db.Integer, default=10)
status = db.Column(db.String(20), default='open') # or closed
required_answer_count = db.Column(db.Integer, default=3)
min_selection_count = db.Column(db.Integer, default=1)
max_selection_count = db.Column(db.Integer, default=1)
begin_time = db.Column(db.DateTime, default=datetime.datetime.now)
end_time = db.Column(db.DateTime, default=lambda: datetime.datetime.now() + datetime.timedelta(days=1))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
location_id = db.Column(db.Integer, db.ForeignKey('location.id'), nullable=True)
location = db.relationship('Location', foreign_keys=location_id)
requester = db.relationship('User')
requester_id = db.Column(db.Integer, db.ForeignKey('user.id'))
selections = db.relationship('Selection', lazy='select')
answers = db.relationship('Answer', lazy='select')
def __unicode__(self):
return '<%s,%s>' % (repr(self.id), self.task)
class Answer(db.Model, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
hit = db.relationship('HIT', lazy='select')
hit_id = db.Column(db.Integer, db.ForeignKey('hit.id'))
brief = db.Column(db.String(100))
attachment_id = db.Column(db.Integer, db.ForeignKey('attachment.id'))
attachment = db.relationship('Attachment', lazy='immediate', foreign_keys=attachment_id)
type = db.Column(db.String(20))
accepted = db.Column(db.Boolean, default=False)
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
location = db.relationship('Location', lazy='select')
location_id = db.Column(db.Integer, db.ForeignKey('location.id'))
worker = db.relationship('User', lazy='select')
worker_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __unicode__(self):
return '<%d,%s,%s>' % (self.id, self.task, self.option)
class Selection(db.Model, BasicModelMixin):
id = db.Column(db.Integer, primary_key=True)
hit = db.relationship('HIT', lazy='select')
hit_id = db.Column(db.Integer, db.ForeignKey('hit.id'))
brief = db.Column(db.String(100))
created_on = db.Column(db.DateTime, default=datetime.datetime.now)
|
6-consumer/lessons/4-multiple-language-support/code/functions/smart-timer-trigger/text-to-speech/__init__.py | Iftu119/IoT-For-Beginners | 9,718 | 11154317 | import io
import os
import requests
import librosa
import soundfile as sf
import azure.functions as func
location = os.environ['SPEECH_LOCATION']
speech_key = os.environ['SPEECH_KEY']
def get_access_token():
headers = {
'Ocp-Apim-Subscription-Key': speech_key
}
token_endpoint = f'https://{location}.api.cognitive.microsoft.com/sts/v1.0/issuetoken'
response = requests.post(token_endpoint, headers=headers)
return str(response.text)
playback_format = 'riff-48khz-16bit-mono-pcm'
def main(req: func.HttpRequest) -> func.HttpResponse:
req_body = req.get_json()
language = req_body['language']
voice = req_body['voice']
text = req_body['text']
url = f'https://{location}.tts.speech.microsoft.com/cognitiveservices/v1'
headers = {
'Authorization': 'Bearer ' + get_access_token(),
'Content-Type': 'application/ssml+xml',
'X-Microsoft-OutputFormat': playback_format
}
ssml = f'<speak version=\'1.0\' xml:lang=\'{language}\'>'
ssml += f'<voice xml:lang=\'{language}\' name=\'{voice}\'>'
ssml += text
ssml += '</voice>'
ssml += '</speak>'
response = requests.post(url, headers=headers, data=ssml.encode('utf-8'))
raw_audio, sample_rate = librosa.load(io.BytesIO(response.content), sr=48000)
resampled = librosa.resample(raw_audio, sample_rate, 44100)
output_buffer = io.BytesIO()
sf.write(output_buffer, resampled, 44100, 'PCM_16', format='wav')
output_buffer.seek(0)
return func.HttpResponse(output_buffer.read(), status_code=200)
|
osbrain/address.py | RezaBehzadpour/osbrain | 176 | 11154329 | """
Implementation of address-related features.
"""
from ipaddress import ip_address
import zmq
from .common import unique_identifier
def address_to_host_port(addr):
"""
Try to convert an address to a (host, port) tuple.
Parameters
----------
addr : str, SocketAddress
Returns
-------
tuple
A (host, port) tuple formed with the corresponding data.
"""
if addr is None:
return (None, None)
# Try the most common case (well-defined types)
try:
return _common_address_to_host_port(addr)
except TypeError:
pass
# Try to do something anyway
if hasattr(addr, 'host') and hasattr(addr, 'port'):
return (addr.host, addr.port)
raise ValueError('Unsupported address type "%s"!' % type(addr))
def _common_address_to_host_port(addr):
"""
Try to convert an address to a (host, port) tuple.
This function is meant to be used with well-known types. For a more
general case, use the `address_to_host_port` function instead.
Parameters
----------
addr : str, SocketAddress, AgentAddress
Returns
-------
tuple
A (host, port) tuple formed with the corresponding data.
"""
if isinstance(addr, SocketAddress):
return (addr.host, addr.port)
if isinstance(addr, AgentAddress):
return (addr.address.host, addr.address.port)
if isinstance(addr, str):
aux = addr.split(':')
if len(aux) == 1:
port = None
else:
port = int(aux[-1])
host = aux[0]
return (host, port)
raise TypeError('Unsupported address type "%s"!' % type(addr))
def guess_kind(kind):
"""
Guess if a kind string is an AgentAddressKind or AgentChannelKind.
Parameters
----------
kind : str
The AgentAddressKind or AgentChannelKind in string format.
Returns
----------
AgentAddressKind or AgentChannelKind
The actual kind type.
"""
try:
return AgentAddressKind(kind)
except ValueError:
return AgentChannelKind(kind)
class AgentAddressTransport(str):
"""
Agent's address transport class. It can be 'tcp', 'ipc' or 'inproc'.
"""
def __new__(cls, value):
if value not in ['tcp', 'ipc', 'inproc']:
raise ValueError('Invalid address transport "%s"!' % value)
return super().__new__(cls, value)
class AgentAddressRole(str):
"""
Agent's address role class. It can either be ``'server'`` or ``'client'``.
"""
def __new__(cls, value):
if value not in ['server', 'client']:
raise ValueError('Invalid address role "%s"!' % value)
return super().__new__(cls, value)
def twin(self):
"""
Get the twin role of the current one. ``'server'`` would be the twin
of ``'client'`` and viceversa.
Returns
-------
AgentAddressRole
The twin role.
"""
if self == 'server':
return self.__class__('client')
return self.__class__('server')
class AgentAddressKind(str):
"""
Agent's address kind class.
This kind represents the communication pattern being used by the agent
address: REP, PULL, PUB...
"""
TWIN = {
'REQ': 'REP',
'REP': 'REQ',
'PUSH': 'PULL',
'PULL': 'PUSH',
'PUB': 'SUB',
'SUB': 'PUB',
'PULL_SYNC_PUB': 'PUSH_SYNC_SUB',
'PUSH_SYNC_SUB': 'PULL_SYNC_PUB',
}
ZMQ_KIND_CONVERSION = {
'REQ': zmq.REQ,
'REP': zmq.REP,
'PUSH': zmq.PUSH,
'PULL': zmq.PULL,
'PUB': zmq.PUB,
'SUB': zmq.SUB,
'PULL_SYNC_PUB': zmq.PULL,
'PUSH_SYNC_SUB': zmq.PUSH,
}
REQUIRE_HANDLER = ('REP', 'PULL', 'SUB', 'PULL_SYNC_PUB')
def __new__(cls, kind):
if kind not in cls.TWIN.keys():
raise ValueError('Invalid address kind "%s"!' % kind)
return super().__new__(cls, kind)
def zmq(self):
"""
Get the equivalent ZeroMQ socket kind.
Returns
-------
int
"""
return self.ZMQ_KIND_CONVERSION[self]
def requires_handler(self):
"""
Whether the Agent's address kind requires a handler or not.
A socket which processes incoming messages would require a
handler (i.e. 'REP', 'PULL', 'SUB'...).
Returns
-------
bool
"""
return self in self.REQUIRE_HANDLER
def twin(self):
"""
Get the twin kind of the current one.
``REQ`` would be the twin of ``REP`` and viceversa, ``PUB`` would be
the twin of ``SUB`` and viceversa, etc.
Returns
-------
AgentAddressKind
The twin kind of the current one.
"""
return self.__class__(self.TWIN[self])
class AgentAddressSerializer(str):
"""
Agent's address serializer class.
Each communication channel will have a serializer.
Note that for ``raw`` message passing, everything must be on bytes, and the
programmer is the one responsible for converting data to bytes.
Parameters
----------
serializer_type : str
Serializer type (i.e.: 'raw', 'pickle', 'cloudpickle', 'dill', 'json').
"""
SERIALIZER_SIMPLE = ('raw',)
SERIALIZER_SEPARATOR = ('pickle', 'cloudpickle', 'dill', 'json')
def __new__(cls, value):
if value not in cls.SERIALIZER_SIMPLE + cls.SERIALIZER_SEPARATOR:
raise ValueError('Invalid serializer type %s!' % value)
return super().__new__(cls, value)
def __init__(self, value):
self.requires_separator = value in self.SERIALIZER_SEPARATOR
class SocketAddress(object):
"""
Socket address information consisting on the host and port.
Parameters
----------
host : str, ipaddress.IPv4Address
IP address.
port : int
Port number.
Attributes
----------
host : ipaddress.IPv4Address
IP address.
port : int
Port number.
"""
def __init__(self, host, port):
assert isinstance(
port, int
), 'Incorrect parameter port on SocketAddress; expecting type int.'
self.host = str(ip_address(host))
self.port = port
def __repr__(self):
"""
Return the string representation of the SocketAddress.
Returns
-------
representation : str
"""
return '%s:%s' % (self.host, self.port)
def __hash__(self):
return hash(self.host) ^ hash(self.port)
def __eq__(self, other):
if not isinstance(other, SocketAddress):
return False
return self.host == other.host and self.port == other.port
class AgentAddress:
"""
Agent address information consisting on the transport protocol, address,
kind and role.
Parameters
----------
transport : str, AgentAddressTransport
Agent transport protocol.
address : str
Agent address.
kind : str, AgentAddressKind
Agent kind.
role : str, AgentAddressRole
Agent role.
serializer : str
Agent serializer type.
Attributes
----------
transport : str, AgentAddressTransport
Agent transport protocol.
address : str, SocketAddress
Agent address.
kind : AgentAddressKind
Agent kind.
role : AgentAddressRole
Agent role.
serializer : AgentAddressSerializer
Agent serializer.
"""
def __init__(self, transport, address, kind, role, serializer):
if transport == 'tcp':
address = SocketAddress(*address_to_host_port(address))
self.transport = AgentAddressTransport(transport)
self.address = address
self.kind = AgentAddressKind(kind)
self.role = AgentAddressRole(role)
self.serializer = AgentAddressSerializer(serializer)
def __repr__(self):
"""
Return the string representation of the AgentAddress.
Returns
-------
representation : str
"""
return 'AgentAddress(%s, %s, %s, %s, %s)' % (
self.transport,
self.address,
self.kind,
self.role,
self.serializer,
)
def __hash__(self):
return (
hash(self.transport)
^ hash(self.address)
^ hash(self.kind)
^ hash(self.role)
^ hash(self.serializer)
)
def __eq__(self, other):
if not isinstance(other, AgentAddress):
return False
return (
self.transport == other.transport
and self.address == other.address
and self.kind == other.kind
and self.role == other.role
and self.serializer == other.serializer
)
def twin(self):
"""
Return the twin address of the current one.
While the `host` and `port` are kept for the twin, the `kind` and
`role` change to their corresponding twins, according to the
rules defined in the respective classes.
Returns
-------
AgentAddress
The twin address of the current one.
"""
kind = self.kind.twin()
role = self.role.twin()
return self.__class__(
self.transport, self.address, kind, role, self.serializer
)
class AgentChannelKind(str):
"""
Agent's channel kind class.
This kind represents the communication pattern being used by the agent
channel: ASYNC_REP, STREAM...
"""
TWIN = {
'ASYNC_REP': 'ASYNC_REQ',
'ASYNC_REQ': 'ASYNC_REP',
'SYNC_PUB': 'SYNC_SUB',
'SYNC_SUB': 'SYNC_PUB',
}
def __new__(cls, kind):
if kind not in cls.TWIN.keys():
raise ValueError('Invalid channel kind "%s"!' % kind)
return super().__new__(cls, kind)
def twin(self):
"""
Get the twin kind of the current one.
``REQ`` would be the twin of ``REP`` and viceversa, ``PUB`` would be
the twin of ``SUB`` and viceversa, etc.
Returns
-------
AgentChannelKind
"""
return self.__class__(self.TWIN[self])
class AgentChannel:
"""
Agent channel information.
Channels are communication means with sender and receiver in both sides
(i.e.: PULL+PUB - PUSH-SUB or PULL+PUSH - PUSH+PULL).
Parameters
----------
kind : AgentChannelKind
Agent kind.
sender : str
First AgentAddress.
receiver : str
Second AgentAddress.
Attributes
----------
kind : AgentChannelKind
Agent kind.
sender : str
First AgentAddress.
receiver : str
Second AgentAddress.
"""
def __init__(self, kind, receiver, sender, twin_uuid=None):
self.kind = AgentChannelKind(kind)
self.receiver = receiver
self.sender = sender
self.transport = receiver.transport if receiver else sender.transport
self.serializer = (
receiver.serializer if receiver else sender.serializer
)
self.uuid = unique_identifier()
self.twin_uuid = twin_uuid
# Set up pairs
if sender:
self.sender.channel = self
if receiver:
self.receiver.channel = self
def __repr__(self):
"""
Return the string representation of the AgentChannel.
Returns
-------
representation : str
"""
return 'AgentChannel(kind=%s, receiver=%s, sender=%s)' % (
self.kind,
self.receiver,
self.sender,
)
def __hash__(self):
return hash(self.kind) ^ hash(self.receiver) ^ hash(self.sender)
def __eq__(self, other):
if not isinstance(other, AgentChannel):
return False
return (
self.kind == other.kind
and self.receiver == other.receiver
and self.sender == other.sender
)
def twin(self):
"""
Get the twin channel of the current one.
Returns
-------
AgentChannel
The twin channel.
"""
kind = self.kind.twin()
sender = self.receiver.twin() if self.receiver is not None else None
receiver = self.sender.twin() if self.sender is not None else None
return self.__class__(
kind=kind, receiver=receiver, sender=sender, twin_uuid=self.uuid
)
|
tests/files/crawl_settings.py | dimichxp/grab | 2,266 | 11154345 | GRAB_SPIDER_CONFIG = {
'global': {
'spider_modules': ['tests.script_crawl'],
},
}
|
pmdarima/utils/tests/test_array.py | tuomijal/pmdarima | 736 | 11154394 |
from pmdarima.utils.array import diff, diff_inv, c, is_iterable, as_series, \
check_exog
from pmdarima.utils import get_callable
from numpy.testing import assert_array_equal, assert_array_almost_equal
import pytest
import pandas as pd
import numpy as np
x = np.arange(5)
m = np.array([10, 5, 12, 23, 18, 3, 2, 0, 12]).reshape(3, 3).T
X = pd.DataFrame.from_records(
np.random.RandomState(2).rand(4, 4),
columns=['a', 'b', 'c', 'd']
)
# need some infinite values in X for testing check_exog
X_nan = X.copy()
X_nan.loc[0, 'a'] = np.nan
X_inf = X.copy()
X_inf.loc[0, 'a'] = np.inf
# for diffinv
x_mat = (np.arange(9) + 1).reshape(3, 3).T
def test_diff():
# test vector for lag = (1, 2), diff = (1, 2)
assert_array_equal(diff(x, lag=1, differences=1), np.ones(4))
assert_array_equal(diff(x, lag=1, differences=2), np.zeros(3))
assert_array_equal(diff(x, lag=2, differences=1), np.ones(3) * 2)
assert_array_equal(diff(x, lag=2, differences=2), np.zeros(1))
# test matrix for lag = (1, 2), diff = (1, 2)
assert_array_equal(diff(m, lag=1, differences=1),
np.array([[-5, -5, -2], [7, -15, 12]]))
assert_array_equal(diff(m, lag=1, differences=2),
np.array([[12, -10, 14]]))
assert_array_equal(diff(m, lag=2, differences=1), np.array([[2, -20, 10]]))
assert diff(m, lag=2, differences=2).shape[0] == 0
@pytest.mark.parametrize(
'arr,lag,differences,xi,expected', [
# VECTORS -------------------------------------------------------------
# > x = c(0, 1, 2, 3, 4)
# > diffinv(x, lag=1, differences=1)
# [1] 0 0 1 3 6 10
pytest.param(x, 1, 1, None, [0, 0, 1, 3, 6, 10]),
# > diffinv(x, lag=1, differences=2)
# [1] 0 0 0 1 4 10 20
pytest.param(x, 1, 2, None, [0, 0, 0, 1, 4, 10, 20]),
# > diffinv(x, lag=2, differences=1)
# [1] 0 0 0 1 2 4 6
pytest.param(x, 2, 1, None, [0, 0, 0, 1, 2, 4, 6]),
# > diffinv(x, lag=2, differences=2)
# [1] 0 0 0 0 0 1 2 5 8
pytest.param(x, 2, 2, None, [0, 0, 0, 0, 0, 1, 2, 5, 8]),
# This is a test of the intermediate stage when x == [1, 0, 3, 2]
pytest.param([1, 0, 3, 2], 1, 1, [0], [0, 1, 1, 4, 6]),
# This is an intermediate stage when x == [0, 1, 2, 3, 4]
pytest.param(x, 1, 1, [0], [0, 0, 1, 3, 6, 10]),
# MATRICES ------------------------------------------------------------
# > matrix(data=c(1, 2, 3, 4, 5, 6, 7, 8, 9), nrow=3, ncol=3)
# [,1] [,2] [,3]
# [1,] 1 4 7
# [2,] 2 5 8
# [3,] 3 6 9
# > diffinv(X, 1, 1)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 1 4 7
# [3,] 3 9 15
# [4,] 6 15 24
pytest.param(x_mat, 1, 1, None,
[[0, 0, 0],
[1, 4, 7],
[3, 9, 15],
[6, 15, 24]]),
# > diffinv(X, 1, 2)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 1 4 7
# [4,] 4 13 22
# [5,] 10 28 46
pytest.param(x_mat, 1, 2, None,
[[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[4, 13, 22],
[10, 28, 46]]),
# > diffinv(X, 2, 1)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 1 4 7
# [4,] 2 5 8
# [5,] 4 10 16
pytest.param(x_mat, 2, 1, None,
[[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[2, 5, 8],
[4, 10, 16]]),
# > diffinv(X, 2, 2)
# [,1] [,2] [,3]
# [1,] 0 0 0
# [2,] 0 0 0
# [3,] 0 0 0
# [4,] 0 0 0
# [5,] 1 4 7
# [6,] 2 5 8
# [7,] 5 14 23
pytest.param(x_mat, 2, 2, None,
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 4, 7],
[2, 5, 8],
[5, 14, 23]]),
]
)
def test_diff_inv(arr, lag, differences, xi, expected):
res = diff_inv(arr, lag=lag, differences=differences, xi=xi)
expected = np.array(expected, dtype=np.float)
assert_array_equal(expected, res)
def test_concatenate():
assert_array_equal(c(1, np.zeros(3)), np.array([1.0, 0.0, 0.0, 0.0]))
assert_array_equal(c([1], np.zeros(3)), np.array([1.0, 0.0, 0.0, 0.0]))
assert_array_equal(c(1), np.ones(1))
assert c() is None
assert_array_equal(c([1]), np.ones(1))
def test_corner_in_callable():
# test the ValueError in the get-callable method
with pytest.raises(ValueError):
get_callable('fake-key', {'a': 1})
def test_corner():
# fails because lag < 1
with pytest.raises(ValueError):
diff(x=x, lag=0)
with pytest.raises(ValueError):
diff_inv(x=x, lag=0)
# fails because differences < 1
with pytest.raises(ValueError):
diff(x=x, differences=0)
with pytest.raises(ValueError):
diff_inv(x=x, differences=0)
# Passing in xi with the incorrect shape to a 2-d array
with pytest.raises(IndexError):
diff_inv(x=np.array([[1, 1], [1, 1]]), xi=np.array([[1]]))
def test_is_iterable():
assert not is_iterable("this string")
assert is_iterable(["this", "list"])
assert not is_iterable(None)
assert is_iterable(np.array([1, 2]))
def test_as_series():
assert isinstance(as_series([1, 2, 3]), pd.Series)
assert isinstance(as_series(np.arange(5)), pd.Series)
assert isinstance(as_series(pd.Series([1, 2, 3])), pd.Series)
@pytest.mark.parametrize(
'arr', [
np.random.rand(5),
pd.Series(np.random.rand(5)),
]
)
def test_check_exog_ndim_value_err(arr):
with pytest.raises(ValueError):
check_exog(arr)
@pytest.mark.parametrize('arr', [X_nan, X_inf])
def test_check_exog_infinite_value_err(arr):
with pytest.raises(ValueError):
check_exog(arr, force_all_finite=True)
# show it passes when False
assert check_exog(
arr, force_all_finite=False, dtype=None, copy=False) is arr
def test_exog_pd_dataframes():
# test with copy
assert check_exog(X, force_all_finite=True, copy=True).equals(X)
# test without copy
assert check_exog(X, force_all_finite=True, copy=False) is X
def test_exog_np_array():
X_np = np.random.RandomState(1).rand(5, 5)
# show works on a list
assert_array_almost_equal(X_np, check_exog(X_np.tolist()))
assert_array_almost_equal(X_np, check_exog(X_np))
|
qutip/tests/test_spmath.py | camponogaraviera/qutip | 1,205 | 11154404 | <gh_stars>1000+
import numpy as np
from numpy.testing import (run_module_suite, assert_,
assert_equal, assert_almost_equal)
import scipy.sparse as sp
from qutip.fastsparse import fast_csr_matrix, fast_identity
from qutip.random_objects import (rand_dm, rand_herm,
rand_ket, rand_unitary)
from qutip.cy.spmath import (zcsr_kron, zcsr_transpose, zcsr_adjoint,
zcsr_isherm)
def test_csr_kron():
"spmath: zcsr_kron"
num_test = 5
for _ in range(num_test):
ra = np.random.randint(2,100)
rb = np.random.randint(2,100)
A = rand_herm(ra,0.5).data
B = rand_herm(rb,0.5).data
As = A.tocsr(1)
Bs = B.tocsr(1)
C = sp.kron(As,Bs, format='csr')
D = zcsr_kron(A, B)
assert_almost_equal(C.data, D.data)
assert_equal(C.indices, D.indices)
assert_equal(C.indptr, D.indptr)
for _ in range(num_test):
ra = np.random.randint(2,100)
rb = np.random.randint(2,100)
A = rand_ket(ra,0.5).data
B = rand_herm(rb,0.5).data
As = A.tocsr(1)
Bs = B.tocsr(1)
C = sp.kron(As,Bs, format='csr')
D = zcsr_kron(A, B)
assert_almost_equal(C.data, D.data)
assert_equal(C.indices, D.indices)
assert_equal(C.indptr, D.indptr)
for _ in range(num_test):
ra = np.random.randint(2,100)
rb = np.random.randint(2,100)
A = rand_dm(ra,0.5).data
B = rand_herm(rb,0.5).data
As = A.tocsr(1)
Bs = B.tocsr(1)
C = sp.kron(As,Bs, format='csr')
D = zcsr_kron(A, B)
assert_almost_equal(C.data, D.data)
assert_equal(C.indices, D.indices)
assert_equal(C.indptr, D.indptr)
for _ in range(num_test):
ra = np.random.randint(2,100)
rb = np.random.randint(2,100)
A = rand_ket(ra,0.5).data
B = rand_ket(rb,0.5).data
As = A.tocsr(1)
Bs = B.tocsr(1)
C = sp.kron(As,Bs, format='csr')
D = zcsr_kron(A, B)
assert_almost_equal(C.data, D.data)
assert_equal(C.indices, D.indices)
assert_equal(C.indptr, D.indptr)
def test_zcsr_transpose():
"spmath: zcsr_transpose"
for k in range(50):
ra = np.random.randint(2,100)
A = rand_ket(ra,0.5).data
B = A.T.tocsr()
C = A.trans()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
for k in range(50):
ra = np.random.randint(2,100)
A = rand_herm(5,1.0/ra).data
B = A.T.tocsr()
C = A.trans()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
for k in range(50):
ra = np.random.randint(2,100)
A = rand_dm(ra,1.0/ra).data
B = A.T.tocsr()
C = A.trans()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
for k in range(50):
ra = np.random.randint(2,100)
A = rand_unitary(ra,1.0/ra).data
B = A.T.tocsr()
C = A.trans()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
def test_zcsr_adjoint():
"spmath: zcsr_adjoint"
for k in range(50):
ra = np.random.randint(2,100)
A = rand_ket(ra,0.5).data
B = A.conj().T.tocsr()
C = A.adjoint()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
for k in range(50):
ra = np.random.randint(2,100)
A = rand_herm(5,1.0/ra).data
B = A.conj().T.tocsr()
C = A.adjoint()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
for k in range(50):
ra = np.random.randint(2,100)
A = rand_dm(ra,1.0/ra).data
B = A.conj().T.tocsr()
C = A.adjoint()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
for k in range(50):
ra = np.random.randint(2,100)
A = rand_unitary(ra,1.0/ra).data
B = A.conj().T.tocsr()
C = A.adjoint()
x = np.all(B.data == C.data)
y = np.all(B.indices == C.indices)
z = np.all(B.indptr == C.indptr)
assert_(x*y*z)
def test_zcsr_mult():
"spmath: zcsr_mult"
for k in range(50):
A = rand_ket(10,0.5).data
B = rand_herm(10,0.5).data
C = A.tocsr(1)
D = B.tocsr(1)
ans1 = B*A
ans2 = D*C
ans2.sort_indices()
x = np.all(ans1.data == ans2.data)
y = np.all(ans1.indices == ans2.indices)
z = np.all(ans1.indptr == ans2.indptr)
assert_(x*y*z)
for k in range(50):
A = rand_ket(10,0.5).data
B = rand_ket(10,0.5).dag().data
C = A.tocsr(1)
D = B.tocsr(1)
ans1 = B*A
ans2 = D*C
ans2.sort_indices()
x = np.all(ans1.data == ans2.data)
y = np.all(ans1.indices == ans2.indices)
z = np.all(ans1.indptr == ans2.indptr)
assert_(x*y*z)
ans1 = A*B
ans2 = C*D
ans2.sort_indices()
x = np.all(ans1.data == ans2.data)
y = np.all(ans1.indices == ans2.indices)
z = np.all(ans1.indptr == ans2.indptr)
assert_(x*y*z)
for k in range(50):
A = rand_dm(10,0.5).data
B = rand_dm(10,0.5).data
C = A.tocsr(1)
D = B.tocsr(1)
ans1 = B*A
ans2 = D*C
ans2.sort_indices()
x = np.all(ans1.data == ans2.data)
y = np.all(ans1.indices == ans2.indices)
z = np.all(ans1.indptr == ans2.indptr)
assert_(x*y*z)
for k in range(50):
A = rand_dm(10,0.5).data
B = rand_herm(10,0.5).data
C = A.tocsr(1)
D = B.tocsr(1)
ans1 = B*A
ans2 = D*C
ans2.sort_indices()
x = np.all(ans1.data == ans2.data)
y = np.all(ans1.indices == ans2.indices)
z = np.all(ans1.indptr == ans2.indptr)
assert_(x*y*z)
def test_zcsr_isherm():
"spmath: zcsr_isherm"
N = 100
for kk in range(100):
A = rand_herm(N, 0.1)
B = rand_herm(N, 0.05) + 1j*rand_herm(N, 0.05)
assert_(zcsr_isherm(A.data))
assert_(zcsr_isherm(B.data)==0)
def test_zcsr_isherm_compare_implicit_zero():
"""
Regression test for gh-1350, comparing explicitly stored values in the
matrix (but below the tolerance for allowable Hermicity) to implicit zeros.
"""
tol = 1e-12
n = 10
base = sp.csr_matrix(np.array([[1, tol * 1e-3j], [0, 1]]))
base = fast_csr_matrix((base.data, base.indices, base.indptr), base.shape)
# If this first line fails, the zero has been stored explicitly and so the
# test is invalid.
assert base.data.size == 3
assert zcsr_isherm(base, tol=tol)
assert zcsr_isherm(base.T, tol=tol)
# A similar test if the structures are different, but it's not
# Hermitian.
base = sp.csr_matrix(np.array([[1, 1j], [0, 1]]))
base = fast_csr_matrix((base.data, base.indices, base.indptr), base.shape)
assert base.data.size == 3
assert not zcsr_isherm(base, tol=tol)
assert not zcsr_isherm(base.T, tol=tol)
# Catch possible edge case where it shouldn't be Hermitian, but faulty loop
# logic doesn't fully compare all rows.
base = sp.csr_matrix(np.array([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
], dtype=np.complex128))
base = fast_csr_matrix((base.data, base.indices, base.indptr), base.shape)
assert base.data.size == 1
assert not zcsr_isherm(base, tol=tol)
assert not zcsr_isherm(base.T, tol=tol)
# Pure diagonal matrix.
base = fast_identity(n)
base.data *= np.random.rand(n)
assert zcsr_isherm(base, tol=tol)
assert not zcsr_isherm(base * 1j, tol=tol)
# Larger matrices where all off-diagonal elements are below the absolute
# tolerance, so everything should always appear Hermitian, but with random
# patterns of non-zero elements. It doesn't matter that it isn't Hermitian
# if scaled up; everything is below absolute tolerance, so it should appear
# so. We also set the diagonal to be larger to the tolerance to ensure
# isherm can't just compare everything to zero.
for density in np.linspace(0.2, 1, 21):
base = tol * 1e-2 * (np.random.rand(n, n) + 1j*np.random.rand(n, n))
# Mask some values out to zero.
base[np.random.rand(n, n) > density] = 0
np.fill_diagonal(base, tol * 1000)
nnz = np.count_nonzero(base)
base = sp.csr_matrix(base)
base = fast_csr_matrix((base.data, base.indices, base.indptr), (n, n))
assert base.data.size == nnz
assert zcsr_isherm(base, tol=tol)
assert zcsr_isherm(base.T, tol=tol)
# Similar test when it must be non-Hermitian. We set the diagonal to
# be real because we want to test off-diagonal implicit zeros, and
# having an imaginary first element would automatically fail.
nnz = 0
while nnz <= n:
# Ensure that we don't just have the real diagonal.
base = tol * 1000j*np.random.rand(n, n)
# Mask some values out to zero.
base[np.random.rand(n, n) > density] = 0
np.fill_diagonal(base, tol * 1000)
nnz = np.count_nonzero(base)
base = sp.csr_matrix(base)
base = fast_csr_matrix((base.data, base.indices, base.indptr), (n, n))
assert base.data.size == nnz
assert not zcsr_isherm(base, tol=tol)
assert not zcsr_isherm(base.T, tol=tol)
if __name__ == "__main__":
run_module_suite()
|
momentumnet-main/examples/drop_in_replacement_advanced.py | ZhuFanCheng/Thesis | 188 | 11154411 | <reponame>ZhuFanCheng/Thesis
"""
======================================================
From ResNets to Momentum ResNets 3)
======================================================
This illustrates on a more complex example how to replace an existing
ResNet with a MomentumNet.
<NAME>, <NAME>, <NAME>,
<NAME>. Momentum Residual Neural Networks.
Proceedings of the 38th International Conference
on Machine Learning, PMLR 139:9276-9287
""" # noqa
# Authors: <NAME>, <NAME>
# License: MIT
import torch
from momentumnet import transform_to_momentumnet
########################################
# We will use a Vision Transformer model
########################################
########################################################################
# From https://arxiv.org/abs/2010.11929
# Code adapted from https://github.com/lucidrains/vit-pytorch
from vit_pytorch import ViT
v = ViT(
image_size=256,
patch_size=32,
num_classes=1000,
dim=1024,
depth=6,
heads=16,
mlp_dim=2048,
dropout=0.1,
emb_dropout=0.1,
)
################################################
# We first rename transformer layer from v to be
# consistent with our forward rule
v.transformer = v.transformer.layers
###################################################
# We simply modify the transformer module to have a
# Sequential form
v_modules = []
for i, _ in enumerate(v.transformer):
for layer in v.transformer[i]:
v_modules.append(layer)
v.transformer = torch.nn.Sequential(*v_modules)
#################################################
# Now we can transform it to its momentum version
mv = transform_to_momentumnet(
v,
["transformer"],
gamma=0.9,
keep_first_layer=False,
use_backprop=False,
is_residual=True,
)
|
tests/test_model.py | MughilM/luke | 467 | 11154415 | <filename>tests/test_model.py<gh_stars>100-1000
import functools
import operator
import pytest
import torch
from transformers import AutoConfig, AutoModel
from luke.model import EntityEmbeddings, LukeConfig, LukeModel
BERT_MODEL_NAME = "bert-base-uncased"
@pytest.fixture
def bert_config():
bert_config = AutoConfig.from_pretrained(BERT_MODEL_NAME)
bert_config.hidden_dropout_prob = 0.0
return bert_config
def _create_luke_config(bert_config, entity_vocab_size, entity_emb_size):
return LukeConfig(
entity_vocab_size=entity_vocab_size,
bert_model_name=BERT_MODEL_NAME,
entity_emb_size=entity_emb_size,
**bert_config.to_dict()
)
def test_entity_embedding(bert_config):
config = _create_luke_config(bert_config, 5, bert_config.hidden_size)
entity_embeddings = EntityEmbeddings(config)
entity_ids = torch.LongTensor([2, 3, 0])
position_ids = torch.LongTensor(
[
[0, 1] + [-1] * (config.max_position_embeddings - 2),
[3] + [-1] * (config.max_position_embeddings - 1),
[-1] * config.max_position_embeddings,
]
)
token_type_ids = torch.LongTensor([0, 1, 0])
emb = entity_embeddings(entity_ids, position_ids, token_type_ids)
assert emb.size() == (3, config.hidden_size)
for n, (entity_id, position_id_list, token_type_id) in enumerate(zip(entity_ids, position_ids, token_type_ids)):
entity_emb = entity_embeddings.entity_embeddings.weight[entity_id]
token_type_emb = entity_embeddings.token_type_embeddings.weight[token_type_id]
position_emb_list = [entity_embeddings.position_embeddings.weight[p] for p in position_id_list if p != -1]
if position_emb_list:
position_emb = functools.reduce(operator.add, position_emb_list) / len(position_emb_list)
target_emb = entity_embeddings.LayerNorm((entity_emb + position_emb + token_type_emb))
else:
target_emb = entity_embeddings.LayerNorm((entity_emb + token_type_emb))
assert torch.equal(emb[n], target_emb)
def test_load_bert_weights(bert_config):
bert_model = AutoModel.from_pretrained(BERT_MODEL_NAME)
bert_state_dict = bert_model.state_dict()
config = _create_luke_config(bert_config, 5, bert_config.hidden_size)
model = LukeModel(config)
model.load_bert_weights(bert_state_dict)
luke_state_dict = model.state_dict()
for key, tensor in bert_state_dict.items():
assert torch.equal(luke_state_dict[key], tensor)
|
terrascript/provider/hashicorp/google_beta.py | mjuenema/python-terrascript | 507 | 11154431 | # terrascript/provider/hashicorp/google_beta.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:17:17 UTC)
import terrascript
class google_beta(terrascript.Provider):
"""terraform-provider-google-beta"""
__description__ = "terraform-provider-google-beta"
__namespace__ = "hashicorp"
__name__ = "google-beta"
__source__ = "https://github.com/hashicorp/terraform-provider-google-beta"
__version__ = "3.85.0"
__published__ = "2021-09-20T20:18:42Z"
__tier__ = "official"
__all__ = ["google_beta"]
|
.modules/.recon-ng/modules/recon/domains-contacts/whois_pocs.py | termux-one/EasY_HaCk | 1,103 | 11154439 | from recon.core.module import BaseModule
from urlparse import urlparse
class Module(BaseModule):
meta = {
'name': 'Whois POC Harvester',
'author': '<NAME> (@LaNMaSteR53)',
'description': 'Uses the ARIN Whois RWS to harvest POC data from whois queries for the given domain. Updates the \'contacts\' table with the results.',
'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL',
}
def module_run(self, domains):
headers = {'Accept': 'application/json'}
for domain in domains:
self.heading(domain, level=0)
url = 'http://whois.arin.net/rest/pocs;domain=%s' % (domain)
self.verbose('URL: %s' % url)
resp = self.request(url, headers=headers)
if 'Your search did not yield any results.' in resp.text:
self.output('No contacts found.')
continue
handles = [x['@handle'] for x in resp.json['pocs']['pocRef']] if type(resp.json['pocs']['pocRef']) == list else [resp.json['pocs']['pocRef']['@handle']]
for handle in handles:
url = 'http://whois.arin.net/rest/poc/%s' % (handle)
self.verbose('URL: %s' % url)
resp = self.request(url, headers=headers)
poc = resp.json['poc']
emails = poc['emails']['email'] if type(poc['emails']['email']) == list else [poc['emails']['email']]
for email in emails:
fname = poc['firstName']['$'] if 'firstName' in poc else None
lname = poc['lastName']['$']
name = ' '.join([x for x in [fname, lname] if x])
email = email['$']
title = 'Whois contact'
city = poc['city']['$'].title()
state = poc['iso3166-2']['$'].upper() if 'iso3166-2' in poc else None
region = ', '.join([x for x in [city, state] if x])
country = poc['iso3166-1']['name']['$'].title()
if email.lower().endswith(domain.lower()):
self.add_contacts(first_name=fname, last_name=lname, email=email, title=title, region=region, country=country)
|
phrase2vec.py | bright1993ff66/emoji2vec | 173 | 11154443 | <filename>phrase2vec.py<gh_stars>100-1000
#!/usr/bin/env python
"""Wrapper for word2vec and emoji2vec models, so that we can query by entire phrase, rather than by
individual words.
"""
# External dependencies
import os.path
import gensim.models as gs
import numpy as np
# Authorship
__author__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
class Phrase2Vec:
"""Wrapper for the word2vec model and emoji2vec model, allowing us to compute phrases"""
def __init__(self, dim, w2v, e2v=None):
"""Constructor for the Phrase2Vec model
Args:
dim: Dimension of the vectors in word2vec and emoji2vec
w2v: Gensim object for word2vec
e2v: Gensim object for emoji2vec
"""
self.wordVecModel = w2v
if e2v is not None:
self.emojiVecModel = e2v
else:
self.emojiVecModel = dict()
self.dimension = dim
@classmethod
def from_word2vec_paths(cls, dim, w2v_path='/data/word2vec/GoogleNews-vectors-negative300.bin',
e2v_path=None):
"""Creates a Phrase2Vec object based on paths for w2v and e2v
Args:
dim: Dimension of the vectors in word2vec and emoji2vec
w2v_path: Path to word2vec vectors
e2v_path: Path to emoji2vec vectors
Returns:
"""
if not os.path.exists(w2v_path):
print(str.format('{} not found. Either provide a different path, or download binary from '
'https://code.google.com/archive/p/word2vec/ and unzip', w2v_path))
w2v = gs.Word2Vec.load_word2vec_format(w2v_path, binary=True)
if e2v_path is not None:
e2v = gs.Word2Vec.load_word2vec_format(e2v_path, binary=True)
else:
e2v = dict()
return cls(dim, w2v, e2v)
def __getitem__(self, item):
"""Get the vector sum of all tokens in a phrase
Args:
item: Phrase to be converted into a vector sum
Returns:
phr_sum: Bag-of-words sum of the tokens in the phrase supplied
"""
tokens = item.split(' ')
phr_sum = np.zeros(self.dimension, np.float32)
for token in tokens:
if token in self.wordVecModel:
phr_sum += self.wordVecModel[token]
elif token in self.emojiVecModel:
phr_sum += self.emojiVecModel[token]
return phr_sum
def from_emoji(self, emoji_vec, top_n=10):
"""Get the top n closest tokens for a supplied emoji vector
Args:
emoji_vec: Emoji vector
top_n: number of results to return
Returns:
Closest n tokens for a supplied emoji_vec
"""
return self.wordVecModel.most_similar(positive=emoji_vec, negative=[], topn=top_n)
def __setitem__(self, key, value):
self.wordVecModel[key] = value
|
Dark Souls/print__stats/common.py | DazEB2/SimplePyScripts | 117 | 11154474 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import requests
from bs4 import BeautifulSoup
def get_parsed_two_column_table_stats(url: str) -> [(str, str)]:
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
table = root.select_one('table')
items = []
for tr in table.select('tr'):
tds = tr.select('td')
if len(tds) != 3:
continue
title_node, description_node = tds[1:]
title = title_node.text.strip()
description = description_node.text.strip()
items.append((title, description))
items.sort(key=lambda x: x[0])
return items
|
neural tensor network/ntn_model.py | kishormishra3/DeepLearn | 1,756 | 11154483 | # -*- coding: utf-8 -*-
import glob
import ntn_input
from ntn import *
import random
import keras
dimx,dimy = 100,100
corrupt_samples = 1
thres = 0.023
warnings.simplefilter("ignore")
'''def word2vec_embedding_layer(embedding_matrix):
#weights = np.load('Word2Vec_QA.syn0.npy')
layer = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1], weights=[embedding_matrix])
return layer
try:
word = wordVec_model['word']
print 'using loaded model.....'
except:
wordVec_model = gen.models.KeyedVectors.load_word2vec_format("I:\\workspace\\neural network\\cornet_evals\\sick\\GoogleNews-vectors-negative300.bin.gz",binary=True)
'''
def contrastive_loss(y_true, y_pred):
margin = 1
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def max_margin(y_true, y_pred):
num_ex = y_pred.shape[0]/2
#print num_ex
y_pos = y_pred[:num_ex]
y_neg = y_pred[num_ex:]
return K.mean(K.maximum(0., 1. - y_pos + y_neg))
def data_to_indexed(data, entities, relations):
entity_to_index = {entities[i] : i for i in range(len(entities))}
relation_to_index = {relations[i] : i for i in range(len(relations))}
indexed_data = [(entity_to_index[data[i][0]], relation_to_index[data[i][1]],\
entity_to_index[data[i][2]]) for i in range(len(data))]
return indexed_data
def get_batch(batch_size, data, num_entities, corrupt_size):
random_indices = random.sample(range(len(data)), batch_size)
#data[i][0] = e1, data[i][1] = r, data[i][2] = e2, random=e3 (corrupted)
batch = [(data[i][0], data[i][1], data[i][2], random.randint(0, num_entities-1))\
for i in random_indices for j in range(corrupt_size)]
return batch
def split_batch(data_batch, num_relations):
batches = [[] for i in range(num_relations)]
for e1,r,e2,e3 in data_batch:
batches[r].append((e1,e2,e3))
return batches
def make_batch(e1,e2,rel,batch_size=100):
new_e1,new_e2,new_rel,labels = [],[],[],[]
split = batch_size/2
mid = (len(e1) - len(e1) % batch_size) / 2
for i in range(0,mid-1,split):
new_e1.extend(e1[i:i+split])
new_e2.extend(e2[i:i+split])
new_rel.extend(rel[i:i+split])
new_e1.extend(e1[mid+i:mid+i+split])
new_e2.extend(e2[mid+i:mid+i+split])
new_rel.extend(rel[mid+i:mid+i+split])
labels.extend([1]*split)
labels.extend([0]*split)
return new_e1,new_e2,new_rel,labels
def fill_entity(e1,e2,max_num):
for key in e1:
if len(e1[key])<max_num:
entity_len = len(e1[key])
train_samples = max_num - entity_len
#print entity_len, max_num
samples = []
for j in range(train_samples):
samples.append(random.randrange(0,entity_len))
for i in samples:
e1[key].append(e1[key][i])
e2[key].append(e2[key][i])
return e1,e2
def prepare_data(corrupt_samples):
raw_training_data = ntn_input.load_training_data(ntn_input.data_path)
raw_dev_data = ntn_input.load_dev_data(ntn_input.data_path)
print("Load entities and relations...")
entities_list = ntn_input.load_entities(ntn_input.data_path)
relations_list = ntn_input.load_relations(ntn_input.data_path)
#python list of (e1, R, e2) for entire training set in index form
indexed_training_data = data_to_indexed(raw_training_data, entities_list, relations_list)
indexed_dev_data = data_to_indexed(raw_dev_data, entities_list, relations_list)
print("Load embeddings...")
(init_word_embeds, entity_to_wordvec) = ntn_input.load_init_embeds(ntn_input.data_path)
num_entities = len(entities_list)
num_relations = len(relations_list)
e1,e2,labels_train,labels_dev,t1,t2 = {},{},[],[],{},{}
for i in indexed_training_data:
try:
e1[i[1]].append(init_word_embeds[i[0]])
e2[i[1]].append(init_word_embeds[i[2]])
except:
e1[i[1]] = []
e2[i[1]] = []
max_len_e1 = max([len(e1[i])for i in e1])
labels_train = [1]*max_len_e1
e1,e2 = fill_entity(e1,e2,max_len_e1)
#bre
for i in range(max_len_e1):
for j in range(corrupt_samples):
for k in range(11):
e1[k].append(init_word_embeds[indexed_training_data[i][0]])
e2[k].append(init_word_embeds[random.randrange(0,len(init_word_embeds))])
labels_train.append(0)
for i in indexed_dev_data:
try:
t1[i[1]].append(init_word_embeds[i[0]])
t2[i[1]].append(init_word_embeds[i[2]])
except:
t1[i[1]] = []
t2[i[1]] = []
max_len_t1 = max([len(t1[i])for i in t1])
labels_dev = [1]*max_len_t1
t1,t2 = fill_entity(t1,t2,max_len_t1)
for i in range(max_len_t1):
for j in range(corrupt_samples):
for k in range(11):
t1[k].append(init_word_embeds[indexed_dev_data[i][0]])
t2[k].append(init_word_embeds[random.randrange(0,len(init_word_embeds))])
labels_dev.append(0)
labels_train,labels_dev = np.array(labels_train),np.array(labels_dev)
new_lab_train, new_lab_dev = [],[]
for i in labels_train:
new_lab_train.append([i]*11)
for j in labels_train:
new_lab_dev.append([j]*11)
return e1,e2,np.array(new_lab_train),t1,t2,np.array(new_lab_dev),num_relations
#e1,e2,labels_train,t1,t2,labels_dev,num_relations = prepare_data()
#if True:
def build_model(num_relations, tensor_slices):
Input_x, Input_y = [], []
#inpx = Input(shape=(dimx,))
#inpy = Input(shape=(dimy,))
for i in range(num_relations):
Input_x.append(Input(shape=(dimx,)))
Input_y.append(Input(shape=(dimy,)))
#ntn = {}
ntn = []
score = []
for i in range(num_relations):
name = 'ntn'+str(i)
#ntn[i] = ntn_layer(inp_size=dimx,out_size=4,name=name)([inpx,inpy])
ntn.append(ntn_layer(inp_size=dimx,out_size=tensor_slices)([Input_x[i],Input_y[i]]))
score.append(Dense(1,activation='sigmoid')(ntn[i]))
#merge_model = Merge(mode='concat')([ntn[i]for i in range(num_relations)])
#score = Dense(num_relations,activation='softmax')(merge_model)
all_inputs = [Input_x[i]for i in range(num_relations)]
all_inputs.extend([Input_y[i]for i in range(num_relations)])
model = Model(all_inputs,score)
model.compile(loss=contrastive_loss,optimizer='adam')
print("Build Model...")
return model
def aggregate(e1,e2,labels_train,t1,t2,labels_dev,num_relations):
e = [np.array(e1[key]) for key in e1]
e.extend(np.array(e2[key]) for key in e2)
t = [np.array(t1[key]) for key in t1]
t.extend(np.array(t2[key]) for key in t2)
labels_train = [labels_train[:,i]for i in range(num_relations)]
labels_dev = [labels_dev[:,i]for i in range(num_relations)]
return e, t, labels_train, labels_dev
'''model = build_model(num_relations)
model.fit(e,labels_train,
nb_epoch=10,
batch_size=100,verbose=2)
'''
|
clarifai/rest/grpc/proto/clarifai/api/cluster_pb2.py | Taik/clarifai-python | 322 | 11154489 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/clarifai/api/cluster.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/clarifai/api/cluster.proto',
package='clarifai.api',
syntax='proto3',
serialized_pb=_b('\n proto/clarifai/api/cluster.proto\x12\x0c\x63larifai.api\"\x15\n\x07\x43luster\x12\n\n\x02id\x18\x01 \x01(\tBZ\n\x1b\x63larifai2.internal.grpc.apiZ\x03\x61pi\xa2\x02\x04\x43\x41IP\xaa\x02\x16\x43larifai.Internal.GRPC\xc2\x02\x01_\xca\x02\x11\x43larifai\\Internalb\x06proto3')
)
_CLUSTER = _descriptor.Descriptor(
name='Cluster',
full_name='clarifai.api.Cluster',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='clarifai.api.Cluster.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=71,
)
DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict(
DESCRIPTOR = _CLUSTER,
__module__ = 'proto.clarifai.api.cluster_pb2'
# @@protoc_insertion_point(class_scope:clarifai.api.Cluster)
))
_sym_db.RegisterMessage(Cluster)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033clarifai2.internal.grpc.apiZ\003api\242\002\004CAIP\252\002\026Clarifai.Internal.GRPC\302\002\001_\312\002\021Clarifai\\Internal'))
# @@protoc_insertion_point(module_scope)
|
examples/csj/visualization/decode_ctc.py | sundogrd/tensorflow_end2end_speech_recognition | 351 | 11154518 | <filename>examples/csj/visualization/decode_ctc.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Decode the trained CTC outputs (CSJ corpus)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, abspath
import sys
import tensorflow as tf
import yaml
import argparse
sys.path.append(abspath('../../../'))
from experiments.csj.data.load_dataset_ctc import Dataset
from models.ctc.ctc import CTC
from utils.io.labels.character import Idx2char
from utils.io.labels.sparsetensor import sparsetensor2list
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=-1,
help='the epoch to restore')
parser.add_argument('--model_path', type=str,
help='path to the model to evaluate')
parser.add_argument('--beam_width', type=int, default=20,
help='beam_width (int, optional): beam width for beam search.' +
' 1 disables beam search, which mean greedy decoding.')
parser.add_argument('--eval_batch_size', type=int, default=1,
help='the size of mini-batch when evaluation. ' +
'If you set -1, batch size is the same as that when training.')
def do_decode(model, params, epoch, beam_width, eval_batch_size):
"""Decode the CTC outputs.
Args:
model: the model to restore
params (dict): A dictionary of parameters
epoch (int): the epoch to restore
beam_width (int): beam width for beam search.
1 disables beam search, which mean greedy decoding.
eval_batch_size (int): the size of mini-batch when evaluation
"""
# Load dataset
eval1_data = Dataset(
data_type='eval1', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'] if eval_batch_size == -
1 else eval_batch_size,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False)
eval2_data = Dataset(
data_type='eval2', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'] if eval_batch_size == -
1 else eval_batch_size,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False)
eval3_data = Dataset(
data_type='eval3', train_data_size=params['train_data_size'],
label_type=params['label_type'],
batch_size=params['batch_size'] if eval_batch_size == -
1 else eval_batch_size,
splice=params['splice'],
num_stack=params['num_stack'], num_skip=params['num_skip'],
shuffle=False)
with tf.name_scope('tower_gpu0'):
# Define placeholders
model.create_placeholders()
# Add to the graph each operation (including model definition)
_, logits = model.compute_loss(model.inputs_pl_list[0],
model.labels_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.keep_prob_pl_list[0])
decode_op = model.decoder(logits,
model.inputs_seq_len_pl_list[0],
beam_width=beam_width)
# Create a saver for writing training checkpoints
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(model.save_path)
# If check point exists
if ckpt:
model_path = ckpt.model_checkpoint_path
if epoch != -1:
model_path = model_path.split('/')[:-1]
model_path = '/'.join(model_path) + \
'/model.ckpt-' + str(epoch)
saver.restore(sess, model_path)
print("Model restored: " + model_path)
else:
raise ValueError('There are not any checkpoints.')
# Visualize
decode(session=sess,
decode_op=decode_op,
model=model,
dataset=eval1_data,
label_type=params['label_type'],
train_data_size=params['train_data_size'],
is_test=True,
save_path=None)
# save_path=model.save_path)
decode(session=sess,
decode_op=decode_op,
model=model,
dataset=eval2_data,
label_type=params['label_type'],
train_data_size=params['train_data_size'],
is_test=True,
save_path=None)
# save_path=model.save_path)
decode(session=sess,
decode_op=decode_op,
model=model,
dataset=eval3_data,
label_type=params['label_type'],
train_data_size=params['train_data_size'],
is_test=True,
save_path=None)
# save_path=model.save_path)
def decode(session, decode_op, model, dataset, label_type,
train_data_size, is_test=True, save_path=None):
"""Visualize label outputs of CTC model.
Args:
session: session of training model
decode_op: operation for decoding
model: the model to evaluate
dataset: An instance of a `Dataset` class
label_type (string): kanji or kanji or kanji_divide or kana_divide
train_data_size (string): train_subset or train_fullset
is_test (bool, optional): set to True when evaluating by the test set
save_path (string, optional): path to save decoding results
"""
if 'kanji' in label_type:
map_file_path = '../metrics/mapping_files/' + \
label_type + '_' + train_data_size + '.txt'
elif 'kana' in label_type:
map_file_path = '../metrics/mapping_files/' + label_type + '.txt'
else:
raise TypeError
idx2char = Idx2char(map_file_path=map_file_path)
if save_path is not None:
sys.stdout = open(join(model.model_dir, 'decode.txt'), 'w')
for data, is_new_epoch in dataset:
# Create feed dictionary for next mini batch
inputs, labels_true, inputs_seq_len, input_names = data
feed_dict = {
model.inputs_pl_list[0]: inputs[0],
model.inputs_seq_len_pl_list[0]: inputs_seq_len[0],
model.keep_prob_pl_list[0]: 1.0
}
# Decode
batch_size = inputs[0].shape[0]
labels_pred_st = session.run(decode_op, feed_dict=feed_dict)
no_output_flag = False
try:
labels_pred = sparsetensor2list(
labels_pred_st, batch_size=batch_size)
except IndexError:
# no output
no_output_flag = True
# Visualize
for i_batch in range(batch_size):
print('----- wav: %s -----' % input_names[0][i_batch])
if is_test:
str_true = labels_true[0][i_batch][0]
else:
str_true = idx2char(labels_true[0][i_batch])
if no_output_flag:
str_pred = ''
else:
str_pred = idx2char(labels_pred[i_batch])
print('Ref: %s' % str_true)
print('Hyp: %s' % str_pred)
if is_new_epoch:
break
def main():
args = parser.parse_args()
# Load config file
with open(join(args.model_path, 'config.yml'), "r") as f:
config = yaml.load(f)
params = config['param']
# Except for a blank label
if params['label_type'] == 'kana':
params['num_classes'] = 146
elif params['label_type'] == 'kana_divide':
params['num_classes'] = 147
elif params['label_type'] == 'kanji':
if params['train_data_size'] == 'train_subset':
params['num_classes'] = 2981
elif params['train_data_size'] == 'train_fullset':
params['num_classes'] = 3385
elif params['label_type'] == 'kanji_divide':
if params['train_data_size'] == 'train_subset':
params['num_classes'] = 2982
elif params['train_data_size'] == 'train_fullset':
params['num_classes'] = 3386
else:
raise TypeError
# Modle setting
model = CTC(encoder_type=params['encoder_type'],
input_size=params['input_size'],
splice=params['splice'],
num_stack=params['num_stack'],
num_units=params['num_units'],
num_layers=params['num_layers'],
num_classes=params['num_classes'],
lstm_impl=params['lstm_impl'],
use_peephole=params['use_peephole'],
parameter_init=params['weight_init'],
clip_grad_norm=params['clip_grad_norm'],
clip_activation=params['clip_activation'],
num_proj=params['num_proj'],
weight_decay=params['weight_decay'])
model.save_path = args.model_path
do_decode(model=model, params=params,
epoch=args.epoch, beam_width=args.beam_width,
eval_batch_size=args.eval_batch_size)
if __name__ == '__main__':
main()
# if input_names[0] not in ['A03M0106_0057', 'A03M0016_0014']:
# continue
|
segmappy/bin/plot_reconstructions.py | Oofs/segmap | 771 | 11154520 | <gh_stars>100-1000
from __future__ import print_function
from builtins import input
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
from sklearn import metrics
import ensure_segmappy_is_installed
from segmappy import Dataset
from segmappy.tools.import_export import load_segments_no_duplicates
from segmappy.tools.voxeltools import *
from mpl_toolkits.mplot3d import *
# run_2018-01-24T11:50:48.334206
originals, sids_originals = load_segments_no_duplicates(
"/tmp/online_matcher/", "run_2018-01-30T14:47:52.511763_segments.csv"
)
reconstructions, sids_reconstructions = load_segments_no_duplicates(
"/tmp/online_matcher/", "run_2018-01-30T14:47:52.511763_reconstructions.csv"
)
# order along z to solve rendering issue in matplotlib
reconstructions_ordered = []
for reconstruction in reconstructions:
reconstructions_ordered.append(reconstruction[reconstruction[:, 2].argsort()])
reconstructions = reconstructions_ordered
MIN_N_POINTS = 300
POINT_SIZE = 20
n_saved = 0
for i in range(len(originals)):
art3d.zalpha = lambda *args: args[0]
fig = plt.figure(1, frameon=False)
plt.clf()
original = originals[i]
reconstruction = reconstructions[i]
if original.shape[0] > MIN_N_POINTS:
seg = original
axes_min = np.array(np.min(seg, axis=0))
axes_max = np.array(np.max(seg, axis=0))
print("axes_min max before ", str(axes_min[2]), " ", str(axes_max[2]))
X = seg[:, 0]
Y = seg[:, 1]
Z = seg[:, 2]
max_range = np.array([X.max() - X.min(), Y.max() - Y.min()]).max() / 2.0
y_before = axes_max[2]
seg = reconstruction
axes_min_temp = np.minimum(axes_min, np.min(seg, axis=0))
axes_max_temp = np.maximum(axes_max, np.max(seg, axis=0))
if axes_max_temp[2] > y_before + 1:
seg[:, 2] = seg[:, 2] - (axes_max_temp[2] - y_before)
axes_min = np.minimum(axes_min, np.min(seg, axis=0))
axes_max = np.maximum(axes_max, np.max(seg, axis=0))
else:
axes_min = axes_min_temp
axes_max = axes_max_temp
print("axes_min max after ", str(axes_min[2]), " ", str(axes_max[2]))
X = seg[:, 0]
Y = seg[:, 1]
Z = seg[:, 2]
max_range = max(
max_range, np.array([X.max() - X.min(), Y.max() - Y.min()]).max() / 2.0
)
ax = fig.add_subplot(121, projection="3d")
seg = original
# marker='.', lw = 0,
ax.scatter(
seg[:, 0],
seg[:, 1],
seg[:, 2],
s=POINT_SIZE / max_range,
c=seg[:, 2],
edgecolors="none",
depthshade=False,
cmap="jet_r",
vmin=axes_min[2],
vmax=axes_max[2],
)
ax.set_xlim(axes_min[0], axes_max[0])
ax.set_ylim(axes_min[1], axes_max[1])
ax.set_zlim(axes_min[2], axes_max[2])
mid_x = (seg[:, 0].max() + seg[:, 0].min()) * 0.5
mid_y = (seg[:, 1].max() + seg[:, 1].min()) * 0.5
mid_z = (seg[:, 2].max() + seg[:, 2].min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_aspect(1)
ax.grid(b=False)
ax.patch.set_facecolor("black")
ax.set_axis_off()
plt.style.use("dark_background")
plt.setp(ax.get_xmajorticklabels(), visible=False)
plt.setp(ax.get_ymajorticklabels(), visible=False)
plt.setp(ax.get_zmajorticklabels(), visible=False)
ax_rec = fig.add_subplot(122, projection="3d")
seg = reconstruction
ax_rec.scatter(
seg[:, 0],
seg[:, 1],
seg[:, 2],
s=POINT_SIZE / max_range,
c=seg[:, 2],
edgecolors="none",
depthshade=True,
cmap="jet_r",
vmin=axes_min[2],
vmax=axes_max[2],
)
ax_rec.set_xlim(axes_min[0], axes_max[0])
ax_rec.set_ylim(axes_min[1], axes_max[1])
ax_rec.set_zlim(axes_min[2], axes_max[2])
mid_x = (seg[:, 0].max() + seg[:, 0].min()) * 0.5
mid_y = (seg[:, 1].max() + seg[:, 1].min()) * 0.5
mid_z = (seg[:, 2].max() + seg[:, 2].min()) * 0.5
ax_rec.set_xlim(mid_x - max_range, mid_x + max_range)
ax_rec.set_ylim(mid_y - max_range, mid_y + max_range)
ax_rec.set_zlim(mid_z - max_range, mid_z + max_range)
ax_rec.set_aspect(1)
ax_rec.grid(b=False)
ax_rec.patch.set_facecolor("black")
ax_rec.set_axis_off()
plt.setp(ax_rec.get_xmajorticklabels(), visible=False)
plt.setp(ax_rec.get_ymajorticklabels(), visible=False)
plt.setp(ax_rec.get_zmajorticklabels(), visible=False)
plt.style.use("dark_background")
plt.draw()
plt.pause(0.001)
command = input("Cmd: ")
while command != "":
if command == "c":
ax.azim = ax_rec.azim
ax.elev = ax_rec.elev
plt.draw()
plt.pause(0.001)
if command == "s":
plt.savefig("reconstructions/seg_" + str(n_saved) + ".pdf")
plt.savefig("reconstructions/seg_" + str(n_saved) + ".png")
n_saved = n_saved + 1
command = input("Cmd: ")
# ax.set_xlim(0, 35)
# ax.set_ylim(0, 35)
# ax.set_zlim(0, 15)
|
histoqc/_pipeline.py | kaczmarj/HistoQC | 140 | 11154527 | """histoqc._pipeline
helper utilities for running the HistoQC pipelines
"""
import glob
import logging
import multiprocessing
import os
import platform
import shutil
import threading
import warnings
from contextlib import ExitStack
from contextlib import contextmanager
from contextlib import nullcontext
from importlib import import_module
from logging.config import dictConfig
from logging.handlers import QueueHandler
# --- logging helpers -------------------------------------------------
DEFAULT_LOG_FN = "error.log"
def setup_logging(*, capture_warnings, filter_warnings):
"""configure histoqc's logging instance
Parameters
----------
capture_warnings: `bool`
flag if warnings should be captured by the logging system
filter_warnings: `str`
action for warnings.filterwarnings
"""
dictConfig({
'version': 1,
'formatters': {
'default': {
'class': 'logging.Formatter',
'format': '%(asctime)s - %(levelname)s - %(message)s',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'default',
},
'logfile': {
'class': 'logging.FileHandler',
'level': 'WARNING',
'filename': DEFAULT_LOG_FN,
'mode': 'w', # we initially start overwriting existing logs
'formatter': 'default',
},
},
'root': {
'level': 'INFO',
'handlers': ['console', 'logfile']
}
})
# configure warnings too...
warnings.filterwarnings(filter_warnings)
logging.captureWarnings(capture_warnings)
def move_logging_file_handler(logger, destination):
"""point the logging file handlers to the new destination
Parameters
----------
logger :
the Logger instance for which the default file handler should be moved
destination :
destination directory for the new file handler
"""
for handler in reversed(logger.handlers):
if not isinstance(handler, logging.FileHandler):
continue
if handler.baseFilename != os.path.join(os.getcwd(), DEFAULT_LOG_FN):
continue
if not destination.endswith(handler.baseFilename):
destination = os.path.join(destination, os.path.relpath(handler.baseFilename, os.getcwd()))
logger.info(f'moving fileHandler {handler.baseFilename!r} to {destination!r}')
# remove handler
logger.removeHandler(handler)
handler.close()
# copy error log to destination
new_filename = shutil.move(handler.baseFilename, destination)
new_handler = logging.FileHandler(new_filename, mode='a')
new_handler.setLevel(handler.level)
new_handler.setFormatter(handler.formatter)
logger.addHandler(new_handler)
class MultiProcessingLogManager:
def __init__(self, logger_name, *, manager):
"""create a MultiProcessingLogManager
Note: this uses a multiprocessing Queue to correctly transfer
logging information from worker processes to the main
process logging instance
Parameters
----------
logger_name : str
the name of the logger instance
manager : multiprocessing.Manager
the mp Manager instance used for creating sharable context
"""
self._logger_name = logger_name
self._log_queue = manager.Queue()
self._log_thread_active = False
@property
def is_main_process(self):
return multiprocessing.current_process().name == "MainProcess"
@property
def logger(self):
"""returns the logger instance"""
if self.is_main_process:
return logging.getLogger(self._logger_name)
else:
root = logging.getLogger()
if not root.hasHandlers():
qh = QueueHandler(self._log_queue)
root.setLevel(logging.INFO)
root.addHandler(qh)
# note: this should be revisited and set by the main process
warnings.filterwarnings('ignore')
logging.captureWarnings(True)
return root
@contextmanager
def logger_thread(self):
"""context manager for multiprocess logging
Note: this starts the thread responsible for handing the log records
emitted by child processes to the main logger instance
"""
assert self.is_main_process
assert not self._log_thread_active # not reentrant...
self._log_thread_active = True
def process_queue(q, ln):
main_logger = logging.getLogger(ln)
while True:
log_record = q.get()
if log_record is None:
break
main_logger.handle(log_record)
lt = threading.Thread(target=process_queue, args=(self._log_queue, self._logger_name))
lt.start()
try:
yield
finally:
self._log_queue.put(None)
lt.join()
self._log_thread_active = False
def log_pipeline(config, log_manager):
"""log the pipeline information
Parameters
----------
config : configparser.ConfigParser
log_manager : MultiProcessingLogManager
"""
assert log_manager.is_main_process
steps = config.get(section='pipeline', option='steps').splitlines()
log_manager.logger.info("the pipeline will use these steps:")
for process in steps:
mod_name, func_name = process.split('.')
log_manager.logger.info(f"\t\t{mod_name}\t{func_name}")
return steps
# --- worker process helpers ------------------------------------------
def setup_plotting_backend(logger=None):
"""loads the correct matplotlib backend
Parameters
----------
logger :
the logging.Logger instance
"""
import matplotlib
if platform.system() != "Windows" and not os.environ.get('DISPLAY'):
if logger is not None:
logger.info('no display found. Using non-interactive Agg backend')
matplotlib.use('Agg')
else:
matplotlib.use('TkAgg')
class BatchedResultFile:
"""BatchedResultFile encapsulates the results writing
Note: this is multiprocessing safe
"""
FILENAME_GLOB = "results*.tsv"
FILENAME_NO_BATCH = "results.tsv"
FILENAME_BATCH = "results_{:d}.tsv"
def __init__(self, dst, *, manager, batch_size=None, force_overwrite=False):
"""create a BatchedResultFile instance
Parameters
----------
dst : os.PathLike
the output directory for the result files
manager : multiprocessing.Manager
the mp Manager instance used for creating sharable context
batch_size : int or None
after `batch_size` calls to increment_counter() the results
file will be rotated
force_overwrite : bool
overwrite result files if they are already present. default
is to append.
"""
if not os.path.isdir(dst):
raise ValueError(f"dst {dst!r} is not a directory or does not exist")
if batch_size is not None:
batch_size = int(batch_size)
if batch_size < 1:
raise ValueError(f"batch_size must be > 0, got {batch_size}")
self.dst = os.path.abspath(dst)
self.batch_size = batch_size
self.force_overwrite = bool(force_overwrite)
# multiprocessing safety
self._headers = manager.list()
self._rlock = manager.RLock()
# internal state
self._batch = 0
self._completed = 0
self._first = True
# contextmanager
self._f = None
self._stack = None
def __enter__(self):
self._stack = ExitStack()
self._stack.callback(self.increment_counter)
self._stack.enter_context(self._rlock)
self._f = nullcontext(self._stack.enter_context(self._file()))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stack.close()
self._stack = None
self._f = None
def _file(self):
if self._f is not None:
return self._f # we're in the context manager
if self.batch_size is None:
fn = self.FILENAME_NO_BATCH
else:
fn = self.FILENAME_BATCH.format(self._batch)
pth = os.path.join(self.dst, fn)
mode = "a"
if self._first and os.path.isfile(pth):
if self.force_overwrite:
mode = "w"
else:
mode = "a"
self._first = False
return open(pth, mode=mode)
def add_header(self, header):
"""add a new header to the results file
Parameters
----------
header :
a string that can be written to the file by calling the
write_headers method
"""
self._headers.append(header)
def is_empty_file(self):
"""return if the current file is empty
Note: this is useful to determine if you want to write_headers
... technically the name is incorrect, but in this use case
pos 0 is equivalent to an empty file
"""
with self._rlock, self._file() as f:
return f.tell() == 0
def write_headers(self, *args):
"""write the internally collected headers to the current file
Parameters
----------
state : dict
the current histoqc implementation writes the outputs to
the header files, so *args supports `state` for now.
overwrite in subclass to control header output behavior
"""
with self._rlock:
# write headers
for line in self._headers:
self.write_line(f"#{line}")
# histoqc specific
_state, = args
_outputs = '\t'.join(_state['output'])
line = f"#dataset:{_outputs}\twarnings"
self.write_line(line)
def write_line(self, text, end="\n"):
"""write text to the file
Parameters
----------
text : str
end : str
defaults to newline
"""
with self._rlock, self._file() as f:
f.write(text)
if end:
f.write(end)
def increment_counter(self):
"""increment the completed counter
moves to the next batch as determined by batch_size
"""
# move on to the next batch if needed
with self._rlock:
self._completed += 1
if self._completed and self.batch_size and self._completed % self.batch_size == 0:
self._batch += 1
self._first = True
@classmethod
def results_in_path(cls, dst):
"""return if a dst path contains results files
Parameters
----------
dst : os.PathLike
"""
return bool(glob.glob(os.path.join(dst, cls.FILENAME_GLOB)))
def load_pipeline(config):
"""load functions and parameters from config
Parameters
----------
config : configparser.ConfigParser
"""
steps = config.get(section='pipeline', option='steps').splitlines()
process_queue = []
for process in steps:
mod_name, func_name = process.split('.')
try:
mod = import_module(f"histoqc.{mod_name}")
except ImportError:
raise NameError(f"Unknown module in pipeline from config file:\t {mod_name}")
func_name = func_name.split(":")[0] # take base of function name
try:
func = getattr(mod, func_name)
except AttributeError:
raise NameError(f"Unknown function from module in pipeline from config file:\t {mod_name}.{func_name}")
if config.has_section(process):
params = dict(config.items(section=process))
else:
params = {}
process_queue.append((func, params))
return process_queue
|
parakeet/frontend/pinyin.py | zh794390558/DeepSpeech | 501 | 11154565 | <gh_stars>100-1000
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Simple Chinese Phonology using pinyin symbols.
The G2P conversion converts pinyin string to symbols. Also it can handle string
in Chinese chracters, but due to the complexity of chinese G2P, we can leave
text -> pinyin to other part of a TTS system. Other NLP techniques may be used
(e.g. tokenization, tagging, NER...)
"""
import re
from itertools import product
from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
from pypinyin.core import DefaultConverter
from pypinyin.core import Pinyin
from pypinyin.core import Style
from parakeet.frontend.phonectic import Phonetics
from parakeet.frontend.vocab import Vocab
_punctuations = [',', '。', '?', '!']
_initials = [
'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j', 'q', 'x', 'zh',
'ch', 'sh', 'r', 'z', 'c', 's'
]
_finals = [
'ii', 'iii', 'a', 'o', 'e', 'ea', 'ai', 'ei', 'ao', 'ou', 'an', 'en', 'ang',
'eng', 'er', 'i', 'ia', 'io', 'ie', 'iai', 'iao', 'iou', 'ian', 'ien',
'iang', 'ieng', 'u', 'ua', 'uo', 'uai', 'uei', 'uan', 'uen', 'uang', 'ueng',
'v', 've', 'van', 'ven', 'veng'
]
_ernized_symbol = ['&r']
_phones = _initials + _finals + _ernized_symbol + _punctuations
_tones = ['0', '1', '2', '3', '4', '5']
_toned_finals = [final + tone for final, tone in product(_finals, _tones[1:])]
_toned_phonems = _initials + _toned_finals + _ernized_symbol + _punctuations
class ParakeetConverter(NeutralToneWith5Mixin, DefaultConverter):
pass
class ParakeetPinyin(Phonetics):
def __init__(self):
self.vocab_phonemes = Vocab(_phones)
self.vocab_tones = Vocab(_tones)
self.pinyin_backend = Pinyin(ParakeetConverter())
def convert_pypinyin_tone3(self, syllables, add_start_end=False):
phonemes, tones = _convert_to_parakeet_style_pinyin(syllables)
if add_start_end:
start = self.vocab_phonemes.start_symbol
end = self.vocab_phonemes.end_symbol
phonemes = [start] + phonemes + [end]
start = self.vocab_tones.start_symbol
end = self.vocab_tones.end_symbol
phonemes = [start] + tones + [end]
phonemes = [
item for item in phonemes if item in self.vocab_phonemes.stoi
]
tones = [item for item in tones if item in self.vocab_tones.stoi]
return phonemes, tones
def phoneticize(self, sentence, add_start_end=False):
""" Normalize the input text sequence and convert it into pronunciation sequence.
Parameters
-----------
sentence: str
The input text sequence.
Returns
----------
List[str]
The list of pronunciation sequence.
"""
syllables = self.pinyin_backend.lazy_pinyin(
sentence, style=Style.TONE3, strict=True)
phonemes, tones = self.convert_pypinyin_tone3(
syllables, add_start_end=add_start_end)
return phonemes, tones
def numericalize(self, phonemes, tones):
""" Convert pronunciation sequence into pronunciation id sequence.
Parameters
-----------
phonemes: List[str]
The list of pronunciation sequence.
Returns
----------
List[int]
The list of pronunciation id sequence.
"""
phoneme_ids = [self.vocab_phonemes.lookup(item) for item in phonemes]
tone_ids = [self.vocab_tones.lookup(item) for item in tones]
return phoneme_ids, tone_ids
def __call__(self, sentence, add_start_end=False):
""" Convert the input text sequence into pronunciation id sequence.
Parameters
-----------
sentence: str
The input text sequence.
Returns
----------
List[str]
The list of pronunciation id sequence.
"""
phonemes, tones = self.phoneticize(
sentence, add_start_end=add_start_end)
phoneme_ids, tone_ids = self.numericalize(phonemes, tones)
return phoneme_ids, tone_ids
@property
def vocab_size(self):
""" Vocab size.
"""
# 70 = 62 phones + 4 punctuations + 4 special tokens
return len(self.vocab_phonemes)
@property
def tone_vocab_size(self):
# 10 = 1 non tone + 5 tone + 4 special tokens
return len(self.vocab_tones)
class ParakeetPinyinWithTone(Phonetics):
def __init__(self):
self.vocab = Vocab(_toned_phonems)
self.pinyin_backend = Pinyin(ParakeetConverter())
def convert_pypinyin_tone3(self, syllables, add_start_end=False):
phonemes = _convert_to_parakeet_style_pinyin_with_tone(syllables)
if add_start_end:
start = self.vocab_phonemes.start_symbol
end = self.vocab_phonemes.end_symbol
phonemes = [start] + phonemes + [end]
phonemes = [item for item in phonemes if item in self.vocab.stoi]
return phonemes
def phoneticize(self, sentence, add_start_end=False):
""" Normalize the input text sequence and convert it into pronunciation sequence.
Parameters
-----------
sentence: str
The input text sequence.
Returns
----------
List[str]
The list of pronunciation sequence.
"""
syllables = self.pinyin_backend.lazy_pinyin(
sentence, style=Style.TONE3, strict=True)
phonemes = self.convert_pypinyin_tone3(
syllables, add_start_end=add_start_end)
return phonemes
def numericalize(self, phonemes):
""" Convert pronunciation sequence into pronunciation id sequence.
Parameters
-----------
phonemes: List[str]
The list of pronunciation sequence.
Returns
----------
List[int]
The list of pronunciation id sequence.
"""
phoneme_ids = [self.vocab.lookup(item) for item in phonemes]
return phoneme_ids
def __call__(self, sentence, add_start_end=False):
""" Convert the input text sequence into pronunciation id sequence.
Parameters
-----------
sentence: str
The input text sequence.
Returns
----------
List[str]
The list of pronunciation id sequence.
"""
phonemes = self.phoneticize(sentence, add_start_end=add_start_end)
phoneme_ids = self.numericalize(phonemes)
return phoneme_ids
@property
def vocab_size(self):
""" Vocab size.
"""
# 230 = 222 phones + 4 punctuations + 4 special tokens
return len(self.vocab)
def _convert_to_parakeet_convension(syllable):
# from pypinyin.Style.TONE3 to parakeet convension
tone = syllable[-1]
syllable = syllable[:-1]
# expansion of o -> uo
syllable = re.sub(r"([bpmf])o$", r"\1uo", syllable)
# expansion for iong, ong
syllable = syllable.replace("iong", "veng").replace("ong", "ueng")
# expansion for ing, in
syllable = syllable.replace("ing", "ieng").replace("in", "ien")
# expansion for un, ui, iu
syllable = syllable.replace("un", "uen") \
.replace("ui", "uei") \
.replace("iu", "iou")
# rule for variants of i
syllable = syllable.replace("zi", "zii") \
.replace("ci", "cii") \
.replace("si", "sii") \
.replace("zhi", "zhiii") \
.replace("chi", "chiii") \
.replace("shi", "shiii") \
.replace("ri", "riii")
# rule for y preceding i, u
syllable = syllable.replace("yi", "i").replace("yu", "v").replace("y", "i")
# rule for w
syllable = syllable.replace("wu", "u").replace("w", "u")
# rule for v following j, q, x
syllable = syllable.replace("ju", "jv") \
.replace("qu", "qv") \
.replace("xu", "xv")
return syllable + tone
def _split_syllable(syllable: str):
global _punctuations
if syllable in _punctuations:
# syllables, tones
return [syllable], ['0']
syllable = _convert_to_parakeet_convension(syllable)
tone = syllable[-1]
syllable = syllable[:-1]
phones = []
tones = []
global _initials
if syllable[:2] in _initials:
phones.append(syllable[:2])
tones.append('0')
phones.append(syllable[2:])
tones.append(tone)
elif syllable[0] in _initials:
phones.append(syllable[0])
tones.append('0')
phones.append(syllable[1:])
tones.append(tone)
else:
phones.append(syllable)
tones.append(tone)
return phones, tones
def _convert_to_parakeet_style_pinyin(syllables):
phones, tones = [], []
for syllable in syllables:
p, t = _split_syllable(syllable)
phones.extend(p)
tones.extend(t)
return phones, tones
def _split_syllable_with_tone(syllable: str):
global _punctuations
if syllable in _punctuations:
# syllables
return [syllable]
syllable = _convert_to_parakeet_convension(syllable)
phones = []
global _initials
if syllable[:2] in _initials:
phones.append(syllable[:2])
phones.append(syllable[2:])
elif syllable[0] in _initials:
phones.append(syllable[0])
phones.append(syllable[1:])
else:
phones.append(syllable)
return phones
def _convert_to_parakeet_style_pinyin_with_tone(syllables):
phones = []
for syllable in syllables:
p = _split_syllable_with_tone(syllable)
phones.extend(p)
return phones
|
diffadjust/blkdiff.py | kevaundray/research | 1,351 | 11154576 | <gh_stars>1000+
import math, random
hashpower = [float(x) for x in open('hashpower.csv').readlines()]
# Target block time
TARGET = 12
# Should be 86400, but can reduce for a quicker sim
SECONDS_IN_DAY = 86400
# Look at the 1/x day exponential moving average
EMA_FACTOR = 0.01
# Damping factor for simple difficulty adjustment
SIMPLE_ADJUST_DAMPING_FACTOR = 20
# Maximum per-block diff adjustment (as fraction of current diff)
SIMPLE_ADJUST_MAX = 0.5
# Damping factor for quadratic difficulty adjustment
QUADRATIC_ADJUST_DAMPING_FACTOR = 3
# Maximum per-block diff adjustment (as fraction of current diff)
QUADRATIC_ADJUST_MAX = 0.5
# Threshold for bounded adjustor
BOUNDED_ADJUST_THRESHOLD = 1.3
# Bounded adjustment factor
BOUNDED_ADJUST_FACTOR = 0.01
# How many blocks back to look
BLKS_BACK = 10
# Naive difficulty adjustment factor
NAIVE_ADJUST_FACTOR = 1/1024.
# Produces a value according to the exponential distribution; used
# to determine the time until the next block given an average block
# time of t
def expdiff(t):
return -math.log(random.random()) * t
# abs_sqr(3) = 9, abs_sqr(-7) = -49, etc
def abs_sqr(x):
return -(x**2) if x < 0 else x**2
# Given an array of the most recent timestamps, and the most recent
# difficulties, compute the next difficulty
def simple_adjust(timestamps, diffs):
if len(timestamps) < BLKS_BACK + 2:
return diffs[-1]
# Total interval between previous block and block a bit further back
delta = timestamps[-2] - timestamps[-2-BLKS_BACK] + 0.0
# Expected interval
expected = TARGET * BLKS_BACK
# Compute adjustment factor
fac = 1 - (delta / expected - 1) / SIMPLE_ADJUST_DAMPING_FACTOR
fac = max(min(fac, 1 + SIMPLE_ADJUST_MAX), 1 - SIMPLE_ADJUST_MAX)
return diffs[-1] * fac
# Alternative adjustment algorithm
def quadratic_adjust(timestamps, diffs):
if len(timestamps) < BLKS_BACK + 2:
return diffs[-1]
# Total interval between previous block and block a bit further back
delta = timestamps[-2] - timestamps[-2-BLKS_BACK] + 0.0
# Expected interval
expected = TARGET * BLKS_BACK
# Compute adjustment factor
fac = 1 - abs_sqr(delta / expected - 1) / QUADRATIC_ADJUST_DAMPING_FACTOR
fac = max(min(fac, 1 + QUADRATIC_ADJUST_MAX), 1 - QUADRATIC_ADJUST_MAX)
return diffs[-1] * fac
# Alternative adjustment algorithm
def bounded_adjust(timestamps, diffs):
if len(timestamps) < BLKS_BACK + 2:
return diffs[-1]
# Total interval between previous block and block a bit further back
delta = timestamps[-2] - timestamps[-2-BLKS_BACK] + 0.0
# Expected interval
expected = TARGET * BLKS_BACK
if delta / expected > BOUNDED_ADJUST_THRESHOLD:
fac = (1 - BOUNDED_ADJUST_FACTOR)
elif delta / expected < 1 / BOUNDED_ADJUST_THRESHOLD:
fac = (1 + BOUNDED_ADJUST_FACTOR) ** (delta / expected)
else:
fac = 1
return diffs[-1] * fac
# Old Ethereum algorithm
def old_adjust(timestamps, diffs):
if len(timestamps) < 2:
return diffs[-1]
delta = timestamps[-1] - timestamps[-2]
expected = TARGET * 0.693
if delta > expected:
fac = 1 - NAIVE_ADJUST_FACTOR
else:
fac = 1 + NAIVE_ADJUST_FACTOR
return diffs[-1] * fac
def test(source, adjust):
# Variables to keep track of for stats purposes
ema = maxema = minema = TARGET
lthalf, gtdouble, lttq, gtft = 0, 0, 0, 0
count = 0
# Block times
times = [0]
# Block difficulty values
diffs = [source[0]]
# Next time to print status update
nextprint = 10**6
# Main loop
while times[-1] < len(source) * SECONDS_IN_DAY:
# Print status update every 10**6 seconds
if times[-1] > nextprint:
print '%d out of %d processed, ema %f' % \
(times[-1], len(source) * SECONDS_IN_DAY, ema)
nextprint += 10**6
# Grab hashpower from data source
hashpower = source[int(times[-1] // SECONDS_IN_DAY)]
# Calculate new difficulty
diffs.append(adjust(times, diffs))
# Calculate next block time
times.append(times[-1] + expdiff(diffs[-1] / hashpower))
# Calculate min and max ema
ema = ema * (1 - EMA_FACTOR) + (times[-1] - times[-2]) * EMA_FACTOR
minema = min(minema, ema)
maxema = max(maxema, ema)
count += 1
# Keep track of number of blocks we are below 75/50% or above
# 133/200% of target
if ema < TARGET * 0.75:
lttq += 1
if ema < TARGET * 0.5:
lthalf += 1
elif ema > TARGET * 1.33333:
gtft += 1
if ema > TARGET * 2:
gtdouble += 1
# Pop items to save memory
if len(times) > 2000:
times.pop(0)
diffs.pop(0)
print 'min', minema, 'max', maxema, 'avg', times[-1] / count, \
'ema < half', lthalf * 1.0 / count, \
'ema > double', gtdouble * 1.0 / count, \
'ema < 3/4', lttq * 1.0 / count, \
'ema > 4/3', gtft * 1.0 / count
# Example usage
# blkdiff.test(blkdiff.hashpower, blkdiff.simple_adjust)
|
tests/integration/conftest.py | lvijnck/tavern | 889 | 11154607 | <gh_stars>100-1000
import pytest
@pytest.fixture
def str_fixture():
return "abc-fixture-value"
@pytest.fixture(name="yield_str_fixture")
def sdkofsok(str_fixture):
yield str_fixture
@pytest.fixture(name="yielder")
def bluerhug(request):
# This doesn't really do anything at the moment. In future it might yield
# the result or something, but it's a bit difficult to do at the moment.
response = yield "hello"
@pytest.fixture(scope="session", autouse=True)
def autouse_thing():
return "abc"
@pytest.fixture(scope="session", autouse=True, name="autouse_thing_named")
def second(autouse_thing):
return autouse_thing
|
vilya/views/api/teams/__init__.py | mubashshirjamal/code | 1,582 | 11154661 | <reponame>mubashshirjamal/code
# -*- coding: utf-8 -*-
from vilya.views.api.team import TeamUI
_q_exports = []
def _q_lookup(request, name):
return TeamUI(name)
def _q_access(request):
request.response.set_content_type('application/json; charset=utf-8')
|
koku/masu/test/database/test_mini_transaction_delete.py | rubik-ai/koku | 157 | 11154672 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the mini_transaction_delete utility function."""
import os
from unittest.mock import patch
from api.provider.models import Sources
from koku.env import ENVIRONMENT
from masu.database.koku_database_access import mini_transaction_delete
from masu.test import MasuTestCase
class MiniTransactionDeleteTest(MasuTestCase):
"""Test Cases for the ReportSchema object."""
def test_mini_transaction_delete_base(self):
for id in range(100, 110):
Sources.objects.create(source_id=-id, offset=1, authentication={})
src_query = Sources.objects.filter(source_id__lt=-99)
start_count = src_query.count()
self.assertEqual(start_count, 10)
del_count, remainder = mini_transaction_delete(src_query)
end_count = src_query.count()
self.assertEqual(del_count, start_count)
self.assertEqual(remainder, 0)
self.assertEqual(end_count, 0)
@patch("masu.database.koku_database_access.mtd_check_remainder")
def test_mini_transaction_delete_remainder(self, remainder_check):
remainder_check.return_value = 1
del_rec_lim_key = "DELETE_CYCLE_RECORD_LIMIT"
del_cycle_try_key = "DELETE_CYCLE_MAX_RETRY"
dcrl = ENVIRONMENT.get_value(del_rec_lim_key, default=None)
dcmr = ENVIRONMENT.get_value(del_cycle_try_key, default=None)
os.environ[del_rec_lim_key] = "3"
os.environ[del_cycle_try_key] = "1"
for id in range(110, 120):
Sources.objects.create(source_id=-id, offset=1, authentication={})
src_query = Sources.objects.filter(source_id__lt=-109)
start_count = src_query.count()
del_count, remainder = mini_transaction_delete(src_query)
end_count = src_query.count()
if dcrl is None:
del os.environ[del_rec_lim_key]
else:
os.environ[del_rec_lim_key] = dcrl
if dcmr is None:
del os.environ[del_cycle_try_key]
else:
os.environ[del_cycle_try_key] = dcmr
self.assertEqual(start_count, 10)
self.assertEqual(remainder, 1)
self.assertEqual(end_count, 0) # Based on how this test is constructed, the records would be deleted anyway.
|
Configuration/ProcessModifiers/python/premix_stage2_cff.py | ckamtsikis/cmssw | 852 | 11154691 | import FWCore.ParameterSet.Config as cms
# This modifier is for the premixing stage2, i.e. mixing the premixed pileup library with the signal event
premix_stage2 = cms.Modifier()
|
src/escpos/magicencode.py | cohorte/python-escpos | 683 | 11154710 | #!/usr/bin/python
# -*- coding: utf-8 -*-
""" Magic Encode
This module tries to convert an UTF-8 string to an encoded string for the printer.
It uses trial and error in order to guess the right codepage.
The code is based on the encoding-code in py-xml-escpos by @fvdsn.
:author: `<NAME> <<EMAIL>>`_
:organization: `python-escpos <https://github.com/python-escpos>`_
:copyright: Copyright (c) 2016 <NAME> and <NAME>
:license: MIT
"""
from builtins import bytes
from .constants import CODEPAGE_CHANGE
from .exceptions import Error
from .codepages import CodePages
import six
class Encoder(object):
"""Takes a list of available code spaces. Picks the right one for a
given character.
Note: To determine the code page, it needs to do the conversion, and
thus already knows what the final byte in the target encoding would
be. Nevertheless, the API of this class doesn't return the byte.
The caller use to do the character conversion itself.
$ python -m timeit -s "{u'ö':'a'}.get(u'ö')"
100000000 loops, best of 3: 0.0133 usec per loop
$ python -m timeit -s "u'ö'.encode('latin1')"
100000000 loops, best of 3: 0.0141 usec per loop
"""
def __init__(self, codepage_map):
self.codepages = codepage_map
self.available_encodings = set(codepage_map.keys())
self.available_characters = {}
self.used_encodings = set()
def get_sequence(self, encoding):
return int(self.codepages[encoding])
def get_encoding_name(self, encoding):
"""Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437.
"""
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding
@staticmethod
def _get_codepage_char_list(encoding):
"""Get codepage character list
Gets characters 128-255 for a given code page, as an array.
:param encoding: The name of the encoding. This must appear in the CodePage list
"""
codepage = CodePages.get_encoding(encoding)
if 'data' in codepage:
encodable_chars = list("".join(codepage['data']))
assert(len(encodable_chars) == 128)
return encodable_chars
elif 'python_encode' in codepage:
encodable_chars = [u" "] * 128
for i in range(0, 128):
codepoint = i + 128
try:
encodable_chars[i] = bytes([codepoint]).decode(codepage['python_encode'])
except UnicodeDecodeError:
# Non-encodable character, just skip it
pass
return encodable_chars
raise LookupError("Can't find a known encoding for {}".format(encoding))
def _get_codepage_char_map(self, encoding):
""" Get codepage character map
Process an encoding and return a map of UTF-characters to code points
in this encoding.
This is generated once only, and returned from a cache.
:param encoding: The name of the encoding.
"""
# Skip things that were loaded previously
if encoding in self.available_characters:
return self.available_characters[encoding]
codepage_char_list = self._get_codepage_char_list(encoding)
codepage_char_map = dict((utf8, i + 128) for (i, utf8) in enumerate(codepage_char_list))
self.available_characters[encoding] = codepage_char_map
return codepage_char_map
def can_encode(self, encoding, char):
"""Determine if a character is encodeable in the given code page.
:param encoding: The name of the encoding.
:param char: The character to attempt to encode.
"""
available_map = {}
try:
available_map = self._get_codepage_char_map(encoding)
except LookupError:
return False
# Decide whether this character is encodeable in this code page
is_ascii = ord(char) < 128
is_encodable = char in available_map
return is_ascii or is_encodable
@staticmethod
def _encode_char(char, charmap, defaultchar):
""" Encode a single character with the given encoding map
:param char: char to encode
:param charmap: dictionary for mapping characters in this code page
"""
if ord(char) < 128:
return ord(char)
if char in charmap:
return charmap[char]
return ord(defaultchar)
def encode(self, text, encoding, defaultchar='?'):
""" Encode text under the given encoding
:param text: Text to encode
:param encoding: Encoding name to use (must be defined in capabilities)
:param defaultchar: Fallback for non-encodable characters
"""
codepage_char_map = self._get_codepage_char_map(encoding)
output_bytes = bytes([self._encode_char(char, codepage_char_map, defaultchar) for char in text])
return output_bytes
def __encoding_sort_func(self, item):
key, index = item
return (
key in self.used_encodings,
index
)
def find_suitable_encoding(self, char):
"""The order of our search is a specific one:
1. code pages that we already tried before; there is a good
chance they might work again, reducing the search space,
and by re-using already used encodings we might also
reduce the number of codepage change instructiosn we have
to send. Still, any performance gains will presumably be
fairly minor.
2. code pages in lower ESCPOS slots first. Presumably, they
are more likely to be supported, so if a printer profile
is missing or incomplete, we might increase our change
that the code page we pick for this character is actually
supported.
"""
sorted_encodings = sorted(
self.codepages.items(),
key=self.__encoding_sort_func)
for encoding, _ in sorted_encodings:
if self.can_encode(encoding, char):
# This encoding worked; at it to the set of used ones.
self.used_encodings.add(encoding)
return encoding
def split_writable_text(encoder, text, encoding):
"""Splits off as many characters from the begnning of text as
are writable with "encoding". Returns a 2-tuple (writable, rest).
"""
if not encoding:
return None, text
for idx, char in enumerate(text):
if encoder.can_encode(encoding, char):
continue
return text[:idx], text[idx:]
return text, None
class MagicEncode(object):
"""A helper that helps us to automatically switch to the right
code page to encode any given Unicode character.
This will consider the printers supported codepages, according
to the printer profile, and if a character cannot be encoded
with the current profile, it will attempt to find a suitable one.
If the printer does not support a suitable code page, it can
insert an error character.
"""
def __init__(self, driver, encoding=None, disabled=False,
defaultsymbol='?', encoder=None):
"""
:param driver:
:param encoding: If you know the current encoding of the printer
when initializing this class, set it here. If the current
encoding is unknown, the first character emitted will be a
codepage switch.
:param disabled:
:param defaultsymbol:
:param encoder:
"""
if disabled and not encoding:
raise Error('If you disable magic encode, you need to define an encoding!')
self.driver = driver
self.encoder = encoder or Encoder(driver.profile.get_code_pages())
self.encoding = self.encoder.get_encoding_name(encoding) if encoding else None
self.defaultsymbol = defaultsymbol
self.disabled = disabled
def force_encoding(self, encoding):
"""Sets a fixed encoding. The change is emitted right away.
From now one, this buffer will switch the code page anymore.
However, it will still keep track of the current code page.
"""
if not encoding:
self.disabled = False
else:
self.write_with_encoding(encoding, None)
self.disabled = True
def write(self, text):
"""Write the text, automatically switching encodings.
"""
if self.disabled:
self.write_with_encoding(self.encoding, text)
return
# See how far we can go into the text with the current encoding
to_write, text = split_writable_text(self.encoder, text, self.encoding)
if to_write:
self.write_with_encoding(self.encoding, to_write)
while text:
# See if any of the code pages that the printer profile
# supports can encode this character.
encoding = self.encoder.find_suitable_encoding(text[0])
if not encoding:
self._handle_character_failed(text[0])
text = text[1:]
continue
# Write as much text as possible with the encoding found.
to_write, text = split_writable_text(self.encoder, text, encoding)
if to_write:
self.write_with_encoding(encoding, to_write)
def _handle_character_failed(self, char):
"""Called when no codepage was found to render a character.
"""
# Writing the default symbol via write() allows us to avoid
# unnecesary codepage switches.
self.write(self.defaultsymbol)
def write_with_encoding(self, encoding, text):
if text is not None and type(text) is not six.text_type:
raise Error("The supplied text has to be unicode, but is of type {type}.".format(
type=type(text)
))
# We always know the current code page; if the new codepage
# is different, emit a change command.
if encoding != self.encoding:
self.encoding = encoding
self.driver._raw(
CODEPAGE_CHANGE +
six.int2byte(self.encoder.get_sequence(encoding)))
if text:
self.driver._raw(self.encoder.encode(text, encoding))
|
tests/test_crle.py | advmach/detools | 119 | 11154731 | <filename>tests/test_crle.py<gh_stars>100-1000
import unittest
import detools
from detools.create import CrleCompressor
from detools.apply import CrleDecompressor
class DetoolsCrleTest(unittest.TestCase):
def test_compress(self):
datas = [
( [b''], b'\x00\x00'),
( [b'A'], b'\x00\x01A'),
( [5 * b'A'], b'\x00\x05AAAAA'),
( [6 * b'A'], b'\x01\x06A'),
( [b'ABBCC', b'CBBA'], b'\x00\x09ABBCCCBBA'),
( [126 * b'A', b'', b'A'], b'\x01\x7fA'),
( [128 * b'A'], b'\x01\x80\x01A'),
( [1000 * b'A'], b'\x01\xe8\x07A'),
( [69999 * b'A', b'A'], b'\x01\xf0\xa2\x04A'),
([10 * b'A', b'BC', 8 * b'A'], b'\x01\x0aA\x00\x02BC\x01\x08A'),
( [10 * b'A' + 8 * b'B'], b'\x01\x0aA\x01\x08B')
]
for chunks, compressed in datas:
compressor = CrleCompressor()
data = b''
for chunk in chunks:
data += compressor.compress(chunk)
data += compressor.flush()
self.assertEqual(data, compressed)
def test_decompress_no_data(self):
compressed = b'\x00\x00'
decompressor = CrleDecompressor(len(compressed))
self.assertEqual(decompressor.needs_input, True)
self.assertEqual(decompressor.decompress(compressed, 1), b'')
self.assertEqual(decompressor.eof, True)
def test_decompress(self):
datas = [
( [b'\x00\x01A'], b'A'),
( [b'\x00\x07AAAAAAA'], 7 * b'A'),
( [b'\x01\x08A'], 8 * b'A'),
( [b'\x00\x09ABBCCCBBA'], b'ABBCCCBBA'),
( [b'\x01\x7f', b'A'], 127 * b'A'),
( [b'\x01\x80\x01A'], 128 * b'A'),
( [b'\x01\xe8\x07A'], 1000 * b'A'),
( [b'\x01\xf0', b'\xa2\x04A'], 70000 * b'A'),
([b'\x01\x0aA\x00\x02BC\x01\x08A'], 10 * b'A' + b'BC' + 8 * b'A'),
( [b'\x01\x0aA\x01\x08B'], 10 * b'A' + 8 * b'B')
]
for chunks, decompressed in datas:
decompressor = CrleDecompressor(sum([len(c) for c in chunks]))
for chunk in chunks:
self.assertEqual(decompressor.needs_input, True)
self.assertEqual(decompressor.eof, False)
decompressor.decompress(chunk, 0)
self.assertEqual(decompressor.needs_input, False)
data = b''
while not decompressor.eof:
data += decompressor.decompress(b'', 1)
self.assertEqual(data, decompressed)
def test_decompress_bad_kind(self):
decompressor = CrleDecompressor(3)
with self.assertRaises(detools.Error) as cm:
decompressor.decompress(b'\x02\x01A', 1)
self.assertEqual(
str(cm.exception),
'Expected kind scattered(0) or repeated(1), but got 2.')
def test_decompress_at_eof(self):
compressed = b'\x00\x01A'
decompressor = CrleDecompressor(len(compressed))
self.assertEqual(decompressor.decompress(compressed, 1), b'A')
self.assertEqual(decompressor.eof, True)
with self.assertRaises(detools.Error) as cm:
decompressor.decompress(b'6', 1)
self.assertEqual(str(cm.exception), 'Already at end of stream.')
with self.assertRaises(detools.Error) as cm:
decompressor.decompress(b'', 1)
self.assertEqual(str(cm.exception), 'Already at end of stream.')
def test_decompress_ignore_extra_data(self):
compressed = b'\x00\x01A'
decompressor = CrleDecompressor(len(compressed))
self.assertEqual(decompressor.decompress(compressed + b'B', 1), b'A')
self.assertEqual(decompressor.eof, True)
if __name__ == '__main__':
unittest.main()
|
ensemble_submits/ensemble.py | tecufly/TextClassify | 216 | 11154734 | <filename>ensemble_submits/ensemble.py
import pandas as pd
import numpy as np
#vote 文件
submits_path='./submits'
#需要进行vote的文件
submits = ['0.82414645.csv','0.8172323.csv','0.81546885000.csv']
#vote时文件的权重
file_weight = [3,2,2]
#vote时标签的权重
label_weight =[1,1,1]
files = []
data = []
for f in submits:
if 'csv' in f:
files.append(f)
data.append(pd.read_csv(submits_path+f).values)
print(len(files))
output = np.zeros([len(data[0]), 3])
for i in range(len(data)):
for j in range(len(data[0])):
if data[i][j][1] == 0:
output[j][0] += file_weight[i]*label_weight
elif data[i][j][1] == 1:
output[j][1] += file_weight[i]*label_weight
elif data[i][j][1] == 2:
output[j][2] += file_weight[i]*label_weight
#读取提交模板,需要设置
submit = pd.read_csv('sub_teample.csv')
submit['label'] = np.argmax(output, axis = 1)
submit.to_csv('submit.csv',index=None)
|
check/validate/client.py | siddhya/gstwebrtc-demos | 451 | 11154784 | <reponame>siddhya/gstwebrtc-demos
# Copyright (c) 2020, <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301, USA.
import threading
import copy
from observer import Signal, WebRTCObserver, DataChannelObserver, StateObserver
from enums import NegotiationState, DataChannelState
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst
gi.require_version("GstWebRTC", "1.0")
from gi.repository import GstWebRTC
gi.require_version("GstSdp", "1.0")
from gi.repository import GstSdp
gi.require_version("GstValidate", "1.0")
from gi.repository import GstValidate
class WebRTCBinObserver(WebRTCObserver):
"""
Observe a webrtcbin element.
"""
def __init__(self, element):
WebRTCObserver.__init__(self)
self.element = element
self.signal_handlers = []
self.signal_handlers.append(element.connect("on-negotiation-needed", self._on_negotiation_needed))
self.signal_handlers.append(element.connect("on-ice-candidate", self._on_ice_candidate))
self.signal_handlers.append(element.connect("pad-added", self._on_pad_added))
self.signal_handlers.append(element.connect("on-new-transceiver", self._on_new_transceiver))
self.signal_handlers.append(element.connect("on-data-channel", self._on_data_channel))
self.negotiation_needed = 0
self._negotiation_needed_observer = StateObserver(self, "negotiation_needed", threading.Condition())
self.on_negotiation_needed = Signal()
self.on_ice_candidate = Signal()
self.on_pad_added = Signal()
self.on_new_transceiver = Signal()
def _on_negotiation_needed(self, element):
self.negotiation_needed += 1
self._negotiation_needed_observer.update(self.negotiation_needed)
self.on_negotiation_needed.fire()
def _on_ice_candidate(self, element, mline, candidate):
self.on_ice_candidate.fire(mline, candidate)
def _on_pad_added(self, element, pad):
self.on_pad_added.fire(pad)
def _on_description_set(self, promise, desc):
new_state = self._update_negotiation_from_description_state(desc)
if new_state == NegotiationState.OFFER_SET:
self.on_offer_set.fire (desc)
elif new_state == NegotiationState.ANSWER_SET:
self.on_answer_set.fire (desc)
def _on_new_transceiver(self, element, transceiver):
self.on_new_transceiver.fire(transceiver)
def _on_data_channel(self, element, channel):
observer = WebRTCBinDataChannelObserver(channel, channel.props.label, 'remote')
self.add_channel(observer)
def _update_negotiation_from_description_state(self, desc):
new_state = None
if desc.type == GstWebRTC.WebRTCSDPType.OFFER:
new_state = NegotiationState.OFFER_SET
elif desc.type == GstWebRTC.WebRTCSDPType.ANSWER:
new_state = NegotiationState.ANSWER_SET
assert new_state is not None
self._update_negotiation_state(new_state)
return new_state
def _deepcopy_session_description(self, desc):
# XXX: passing 'offer' to both a promise and an action signal without
# a deepcopy will segfault...
new_sdp = GstSdp.SDPMessage.new()[1]
GstSdp.sdp_message_parse_buffer(bytes(desc.sdp.as_text().encode()), new_sdp)
return GstWebRTC.WebRTCSessionDescription.new(desc.type, new_sdp)
def _on_offer_created(self, promise, element):
self._update_negotiation_state(NegotiationState.OFFER_CREATED)
reply = promise.get_reply()
offer = reply['offer']
new_offer = self._deepcopy_session_description(offer)
promise = Gst.Promise.new_with_change_func(self._on_description_set, new_offer)
new_offer = self._deepcopy_session_description(offer)
self.element.emit('set-local-description', new_offer, promise)
self.on_offer_created.fire(offer)
def _on_answer_created(self, promise, element):
self._update_negotiation_state(NegotiationState.ANSWER_CREATED)
reply = promise.get_reply()
offer = reply['answer']
new_offer = self._deepcopy_session_description(offer)
promise = Gst.Promise.new_with_change_func(self._on_description_set, new_offer)
new_offer = self._deepcopy_session_description(offer)
self.element.emit('set-local-description', new_offer, promise)
self.on_answer_created.fire(offer)
def create_offer(self, options=None):
promise = Gst.Promise.new_with_change_func(self._on_offer_created, self.element)
self.element.emit('create-offer', options, promise)
def create_answer(self, options=None):
promise = Gst.Promise.new_with_change_func(self._on_answer_created, self.element)
self.element.emit('create-answer', options, promise)
def set_remote_description(self, desc):
promise = Gst.Promise.new_with_change_func(self._on_description_set, desc)
self.element.emit('set-remote-description', desc, promise)
def add_ice_candidate(self, mline, candidate):
self.element.emit('add-ice-candidate', mline, candidate)
def add_data_channel(self, ident):
channel = self.element.emit('create-data-channel', ident, None)
observer = WebRTCBinDataChannelObserver(channel, ident, 'local')
self.add_channel(observer)
def wait_for_negotiation_needed(self, generation):
self._negotiation_needed_observer.wait_for ((generation,))
class WebRTCStream(object):
"""
An stream attached to a webrtcbin element
"""
def __init__(self):
self.bin = None
def set_description(self, desc):
assert self.bin is None
self.bin = Gst.parse_bin_from_description(desc, True)
def add_and_link(self, parent, link):
assert self.bin is not None
self.bin.set_locked_state(True)
parent.add(self.bin)
src = self.bin.get_static_pad("src")
sink = self.bin.get_static_pad("sink")
assert src is None or sink is None
if src:
self.bin.link(link)
if sink:
link.link(self.bin)
self.bin.set_locked_state(False)
self.bin.sync_state_with_parent()
def add_and_link_to(self, parent, link, pad):
assert self.bin is not None
self.bin.set_locked_state(True)
parent.add(self.bin)
src = self.bin.get_static_pad("src")
sink = self.bin.get_static_pad("sink")
assert src is None or sink is None
if pad.get_direction() == Gst.PadDirection.SRC:
assert sink is not None
pad.link(sink)
if pad.get_direction() == Gst.PadDirection.SINK:
assert src is not None
src.link(pad)
self.bin.set_locked_state(False)
self.bin.sync_state_with_parent()
class WebRTCClient(WebRTCBinObserver):
"""
Client for performing webrtc operations. Controls the pipeline that
contains a webrtcbin element.
"""
def __init__(self):
self.pipeline = Gst.Pipeline(None)
self.webrtcbin = Gst.ElementFactory.make("webrtcbin")
super().__init__(self.webrtcbin)
self.pipeline.add(self.webrtcbin)
self._streams = []
def stop(self):
self.pipeline.set_state (Gst.State.NULL)
def add_stream(self, desc):
stream = WebRTCStream()
stream.set_description(desc)
stream.add_and_link (self.pipeline, self.webrtcbin)
self._streams.append(stream)
def add_stream_with_pad(self, desc, pad):
stream = WebRTCStream()
stream.set_description(desc)
stream.add_and_link_to (self.pipeline, self.webrtcbin, pad)
self._streams.append(stream)
def set_options (self, opts):
if opts.has_field("local-bundle-policy"):
self.webrtcbin.props.bundle_policy = opts["local-bundle-policy"]
class WebRTCBinDataChannelObserver(DataChannelObserver):
"""
Data channel observer for a webrtcbin data channel.
"""
def __init__(self, target, ident, location):
super().__init__(ident, location)
self.target = target
self.signal_handlers = []
self.signal_handlers.append(target.connect("on-open", self._on_open))
self.signal_handlers.append(target.connect("on-close", self._on_close))
self.signal_handlers.append(target.connect("on-error", self._on_error))
self.signal_handlers.append(target.connect("on-message-data", self._on_message_data))
self.signal_handlers.append(target.connect("on-message-string", self._on_message_string))
self.signal_handlers.append(target.connect("on-buffered-amount-low", self._on_buffered_amount_low))
def _on_open(self, channel):
self._update_state (DataChannelState.OPEN)
def _on_close(self, channel):
self._update_state (DataChannelState.CLOSED)
def _on_error(self, channel):
self._update_state (DataChannelState.ERROR)
def _on_message_data(self, channel, data):
self.data.append(msg)
def _on_message_string(self, channel, msg):
self.got_message (msg)
def _on_buffered_amount_low(self, channel):
pass
def close(self):
self.target.emit('close')
def send_string (self, msg):
self.target.emit('send-string', msg)
|
physics_planning_games/board_games/_internal/pieces.py | mitchchristow/deepmind-research | 10,110 | 11154834 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entities representing board game pieces."""
import itertools
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
import numpy as np
_VISIBLE_SITE_GROUP = 0
_INVISIBLE_SITE_GROUP = 3
_RED = (1., 0., 0., 0.5)
_BLUE = (0., 0, 1., 0.5)
_INVALID_PLAYER_ID = '`player_id` must be between 0 and {}, got {}.'
_NO_MORE_MARKERS_AVAILABLE = (
'All {} markers for player {} have already been placed.')
class Markers(composer.Entity):
"""A collection of non-physical entities for marking board positions."""
def _build(self,
num_per_player,
player_colors=(_RED, _BLUE),
halfwidth=0.025,
height=0.01,
board_size=7):
"""Builds a `Markers` entity.
Args:
num_per_player: Integer, the total number of markers to create per player.
player_colors: Sequence of (R, G, B, A) values specifying the marker
colors for each player.
halfwidth: Scalar, the halfwidth of each marker.
height: Scalar, height of each marker.
board_size: Integer, optional if using the integer indexing.
"""
root = mjcf.RootElement(model='markers')
root.default.site.set_attributes(type='cylinder', size=(halfwidth, height))
all_markers = []
for i, color in enumerate(player_colors):
player_name = 'player_{}'.format(i)
# TODO(alimuldal): Would look cool if these were textured.
material = root.asset.add('material', name=player_name, rgba=color)
player_markers = []
for j in range(num_per_player):
player_markers.append(
root.worldbody.add(
'site',
name='player_{}_move_{}'.format(i, j),
material=material))
all_markers.append(player_markers)
self._num_players = len(player_colors)
self._mjcf_model = root
self._all_markers = all_markers
self._move_counts = [0] * self._num_players
# To go from integer position to marker index in the all_markers array
self._marker_ids = np.zeros((2, board_size, board_size))
self._board_size = board_size
def _build_observables(self):
return MarkersObservables(self)
@property
def mjcf_model(self):
"""`mjcf.RootElement` for this entity."""
return self._mjcf_model
@property
def markers(self):
"""Marker sites belonging to all players.
Returns:
A nested list, where `markers[i][j]` contains the `mjcf.Element`
corresponding to player i's jth marker.
"""
return self._all_markers
def initialize_episode(self, physics, random_state):
"""Resets the markers at the start of an episode."""
del random_state # Unused.
self._reset(physics)
def _reset(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.pos = 0. # Markers are initially placed at the origin.
bound_marker.group = _INVISIBLE_SITE_GROUP
self._move_counts = [0] * self._num_players
self._marker_ids = np.zeros((2, self._board_size, self._board_size),
dtype=np.int32)
def make_all_invisible(self, physics):
for player_markers in self._all_markers:
for marker in player_markers:
bound_marker = physics.bind(marker)
bound_marker.group = _INVISIBLE_SITE_GROUP
def make_visible_by_bpos(self, physics, player_id, all_bpos):
for bpos in all_bpos:
marker_id = self._marker_ids[player_id][bpos[0]][bpos[1]]
marker = self._all_markers[player_id][marker_id]
bound_marker = physics.bind(marker)
bound_marker.group = _VISIBLE_SITE_GROUP
def mark(self, physics, player_id, pos, bpos=None):
"""Enables the visibility of a marker, moves it to the specified position.
Args:
physics: `mjcf.Physics` instance.
player_id: Integer specifying the ID of the player whose marker to use.
pos: Array-like object specifying the cartesian position of the marker.
bpos: Board position, optional integer coordinates to index the markers.
Raises:
ValueError: If `player_id` is invalid.
RuntimeError: If `player_id` has no more available markers.
"""
if not 0 <= player_id < self._num_players:
raise ValueError(
_INVALID_PLAYER_ID.format(self._num_players - 1, player_id))
markers = self._all_markers[player_id]
move_count = self._move_counts[player_id]
if move_count >= len(markers):
raise RuntimeError(
_NO_MORE_MARKERS_AVAILABLE.format(move_count, player_id))
bound_marker = physics.bind(markers[move_count])
bound_marker.pos = pos
# TODO(alimuldal): Set orientation as well (random? same as contact frame?)
bound_marker.group = _VISIBLE_SITE_GROUP
self._move_counts[player_id] += 1
if bpos:
self._marker_ids[player_id][bpos[0]][bpos[1]] = move_count
class MarkersObservables(composer.Observables):
"""Observables for a `Markers` entity."""
@composer.observable
def position(self):
"""Cartesian positions of all marker sites.
Returns:
An `observable.MJCFFeature` instance. When called with an instance of
`physics` as the argument, this will return a numpy float64 array of shape
(num_players * num_markers, 3) where each row contains the cartesian
position of a marker. Unplaced markers will have position (0, 0, 0).
"""
return observable.MJCFFeature(
'xpos', list(itertools.chain.from_iterable(self._entity.markers)))
|
zat/log_to_dataframe.py | SuperCowPowers/zat | 146 | 11154869 | """LogToDataFrame: Converts a Zeek log to a Pandas DataFrame"""
# Third Party
import pandas as pd
# Local
from zat import zeek_log_reader
class LogToDataFrame(object):
"""LogToDataFrame: Converts a Zeek log to a Pandas DataFrame
Notes:
This class has recently been overhauled from a simple loader to a more
complex class that should in theory:
- Select better types for each column
- Should be faster
- Produce smaller memory footprint dataframes
If you have any issues/problems with this class please submit a GitHub issue.
More Info: https://supercowpowers.github.io/zat/large_dataframes.html
"""
def __init__(self):
"""Initialize the LogToDataFrame class"""
# First Level Type Mapping
# This map defines the types used when first reading in the Zeek log into a 'chunk' dataframes.
# Types (like time and interval) will be defined as one type at first but then
# will undergo further processing to produce correct types with correct values.
# See: https://stackoverflow.com/questions/29245848/what-are-all-the-dtypes-that-pandas-recognizes
# for more info on supported types.
self.type_map = {'bool': 'category', # Can't hold NaN values in 'bool', so we're going to use category
'count': 'UInt64',
'int': 'Int32',
'double': 'float',
'time': 'float', # Secondary Processing into datetime
'interval': 'float', # Secondary processing into timedelta
'port': 'UInt16'
}
def _get_field_info(self, log_filename):
"""Internal Method: Use ZAT log reader to read header for names and types"""
_zeek_reader = zeek_log_reader.ZeekLogReader(log_filename)
_, field_names, field_types, _ = _zeek_reader._parse_zeek_header(log_filename)
return field_names, field_types
def _create_initial_df(self, log_filename, all_fields, usecols, dtypes):
"""Internal Method: Create the initial dataframes by using Pandas read CSV (primary types correct)"""
return pd.read_csv(log_filename, sep='\t', names=all_fields, usecols=usecols, dtype=dtypes, comment="#", na_values='-')
def create_dataframe(self, log_filename, ts_index=True, aggressive_category=True, usecols=None):
""" Create a Pandas dataframe from a Bro/Zeek log file
Args:
log_fllename (string): The full path to the Zeek log
ts_index (bool): Set the index to the 'ts' field (default = True)
aggressive_category (bool): convert unknown columns to category (default = True)
usecol (list): A subset of columns to read in (minimizes memory usage) (default = None)
"""
# Grab the field information
field_names, field_types = self._get_field_info(log_filename)
all_fields = field_names # We need ALL the fields for later
# If usecols is set then we'll subset the fields and types
if usecols:
# Usecols needs to include ts
if 'ts' not in usecols:
usecols.append('ts')
field_types = [t for t, field in zip(field_types, field_names) if field in usecols]
field_names = [field for field in field_names if field in usecols]
# Get the appropriate types for the Pandas Dataframe
pandas_types = self.pd_column_types(field_names, field_types, aggressive_category)
# Now actually read in the initial dataframe
self._df = self._create_initial_df(log_filename, all_fields, usecols, pandas_types)
# Now we convert 'time' and 'interval' fields to datetime and timedelta respectively
for name, zeek_type in zip(field_names, field_types):
if zeek_type == 'time':
self._df[name] = pd.to_datetime(self._df[name], unit='s')
if zeek_type == 'interval':
self._df[name] = pd.to_timedelta(self._df[name], unit='s')
# Set the index
if ts_index and not self._df.empty:
try:
self._df.set_index('ts', inplace=True)
except KeyError:
print('Could not find ts/timestamp for index...')
return self._df
def pd_column_types(self, column_names, column_types, aggressive_category=True, verbose=False):
"""Given a set of names and types, construct a dictionary to be used
as the Pandas read_csv dtypes argument"""
# Agressive Category means that types not in the current type_map are
# mapped to a 'category' if aggressive_category is False then they
# are mapped to an 'object' type
unknown_type = 'category' if aggressive_category else 'object'
pandas_types = {}
for name, zeek_type in zip(column_names, column_types):
# Grab the type
item_type = self.type_map.get(zeek_type)
# Sanity Check
if not item_type:
# UID/FUID/GUID always gets mapped to object
if 'uid' in name:
item_type = 'object'
else:
if verbose:
print('Could not find type for {:s} using {:s}...'.format(zeek_type, unknown_type))
item_type = unknown_type
# Set the pandas type
pandas_types[name] = item_type
# Return the dictionary of name: type
return pandas_types
# Simple test of the functionality
def test():
"""Test for LogToDataFrame Class"""
import os
pd.set_option('display.width', 1000)
from zat.utils import file_utils
# Grab a test file
data_path = file_utils.relative_dir(__file__, '../data')
log_path = os.path.join(data_path, 'conn.log')
# Convert it to a Pandas DataFrame
log_to_df = LogToDataFrame()
my_df = log_to_df.create_dataframe(log_path)
# Print out the head
print(my_df.head())
# Print out the datatypes
print(my_df.dtypes)
# Test a bunch
tests = ['app_stats.log', 'dns.log', 'http.log', 'notice.log', 'tor_ssl.log',
'conn.log', 'dhcp_002.log', 'files.log', 'smtp.log', 'weird.log',
'ftp.log', 'ssl.log', 'x509.log']
for log_path in [os.path.join(data_path, log) for log in tests]:
print('Testing: {:s}...'.format(log_path))
my_df = log_to_df.create_dataframe(log_path)
print(my_df.head())
print(my_df.dtypes)
# Test out usecols arg
conn_path = os.path.join(data_path, 'conn.log')
my_df = log_to_df.create_dataframe(conn_path, usecols=['id.orig_h', 'id.orig_p', 'id.resp_h', 'id.resp_p',
'proto', 'orig_bytes', 'resp_bytes'])
# Test an empty log (a log with header/close but no data rows)
log_path = os.path.join(data_path, 'http_empty.log')
my_df = log_to_df.create_dataframe(log_path)
# Print out the head
print(my_df.head())
# Print out the datatypes
print(my_df.dtypes)
print('LogToDataFrame Test successful!')
if __name__ == '__main__':
# Run the test for easy testing/debugging
test()
|
openbook_auth/migrations/0051_auto_20191209_1338.py | TamaraAbells/okuna-api | 164 | 11154911 | # Generated by Django 2.2.5 on 2019-12-09 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openbook_auth', '0050_auto_20191209_1335'),
]
operations = [
migrations.AlterField(
model_name='usernotificationssubscription',
name='new_post_notifications',
field=models.BooleanField(default=False),
),
]
|
custom_components/samsungtv_custom/samsungctl_080b/remote_encrypted/rijndael/__init__.py | AdamOttvar/ha-samsungtv-custom | 117 | 11154925 | from .rijndael import Rijndael
|
fpn/demo.py | LZP4GitHub/RoITransformer_DOTA | 200 | 11154964 | <reponame>LZP4GitHub/RoITransformer_DOTA<gh_stars>100-1000
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>
# --------------------------------------------------------
import _init_paths
import argparse
import os
import sys
import logging
import pprint
import cv2
from config.config import config, update_config
from utils.image import resize, transform
import numpy as np
# get config
os.environ['PYTHONUNBUFFERED'] = '1'
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
os.environ['MXNET_ENABLE_GPU_P2P'] = '0'
cur_path = os.path.abspath(os.path.dirname(__file__))
update_config(cur_path + '/../experiments/fpn/cfgs/resnet_v1_101_dota_rotbox_light_head_RoITransformer_trainval_fpn_end2end.yaml')
sys.path.insert(0, os.path.join(cur_path, '../external/mxnet', config.MXNET_VERSION))
sys.path.insert(0, '../')
import mxnet as mx
from core.tester import im_detect, Predictor, im_detect_rotbox_Rroi
from symbols import *
from utils.load_model import load_param
from utils.show_boxes import show_boxes
from utils.tictoc import tic, toc
from nms.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
from dota_kit.ResultMerge import py_cpu_nms_poly
from utils import image
import pdb
def parse_args():
parser = argparse.ArgumentParser(description='Show Deformable ConvNets demo')
# general
parser.add_argument('--rfcn_only', help='whether use fpn only (w/o Deformable ConvNets)', default=False, action='store_true')
args = parser.parse_args()
return args
args = parse_args()
def draw_all_poly_detection(im_array, detections, class_names, scale, cfg, threshold=0.2):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
# pdb.set_trace()
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
try:
dets = detections[j]
except:
pdb.set_trace()
for det in dets:
bbox = det[:8] * scale
score = det[-1]
if score < threshold:
continue
bbox = map(int, bbox)
# draw first point
cv2.circle(im, (bbox[0], bbox[1]), 3, (0, 0, 255), -1)
for i in range(3):
cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2)
cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
def main():
# get symbol
pprint.pprint(config)
# config.symbol = 'resnet_v1_101_rfcn_dcn' if not args.rfcn_only else 'resnet_v1_101_rfcn'
config.symbol = 'resnet_v1_101_fpn_rcnn_rotbox_light_head_RoITransformer'
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
# set up class names
num_classes = 15
classes = ['__background__', # always index 0
'plane', 'baseball-diamond',
'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle',
'ship', 'tennis-court',
'basketball-court', 'storage-tank',
'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool',
'helicopter']
# load demo data
image_names = ['P0004__1__0___0.png', 'P0053__1__0___0.png', 'P0060__1__1648___824.png']
data = []
for im_name in image_names:
# pdb.set_trace()
assert os.path.exists(cur_path + '/../demo/' + im_name), ('%s does not exist'.format('../demo/' + im_name))
im = cv2.imread(cur_path + '/../demo/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32)
data.append({'data': im_tensor, 'im_info': im_info})
# get predictor
data_names = ['data', 'im_info']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
# arg_params, aux_params = load_param(cur_path + '/../model/' + ('rfcn_dcn_coco' if not args.rfcn_only else 'rfcn_coco'), 0, process=True)
# TODO: change this path
arg_params, aux_params = load_param(r'/home/dj/code/Deformable_FPN_DOTA/output/fpn/DOTA/resnet_v1_101_dota_rotbox_light_head_Rroi_v6_trainval_fpn_end2end/train/fpn_DOTA_oriented',
config.TEST.test_epoch, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
nms = gpu_nms_wrapper(config.TEST.NMS, 0)
# warm up
for j in xrange(2):
data_batch = mx.io.DataBatch(data=[data[0]], label=[], pad=0, index=0,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[0])]],
provide_label=[None])
scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]
scores, boxes, data_dict = im_detect_rotbox_Rroi(predictor, data_batch, data_names, scales, config)
# test
for idx, im_name in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
scales = [data_batch.data[i][1].asnumpy()[0, 2] for i in xrange(len(data_batch.data))]
tic()
scores, boxes, data_dict = im_detect_rotbox_Rroi(predictor, data_batch, data_names, scales, config)
# boxes = boxes[0].astype('f')
# scores = scores[0].astype('f')
boxes = boxes[0].astype('float64')
scores = scores[0].astype('float64')
dets_nms = []
for j in range(1, scores.shape[1]):
cls_scores = scores[:, j, np.newaxis]
# cls_boxes = boxes[:, 4:8] if config.CLASS_AGNOSTIC else boxes[:, j * 4:(j + 1) * 4]
cls_boxes = boxes[:, 8:16] if config.CLASS_AGNOSTIC else boxes[:, j * 8:(j + 1) * 8]
cls_quadrangle_dets = np.hstack((cls_boxes, cls_scores))
# keep = nms(cls_dets)
keep = py_cpu_nms_poly(cls_quadrangle_dets, 0.3)
cls_quadrangle_dets = cls_quadrangle_dets[keep, :]
cls_quadrangle_dets = cls_quadrangle_dets[cls_quadrangle_dets[:, -1] > 0.7, :]
dets_nms.append(cls_quadrangle_dets)
print 'testing {} {:.4f}s'.format(im_name, toc())
# visualize
# im = cv2.imread(cur_path + '/../demo/' + im_name)
# im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# pdb.set_trace()
im = draw_all_poly_detection(data_dict[0]['data'].asnumpy(), dets_nms, classes[1:], data[idx][1].asnumpy()[0][2], config,
threshold=0.2)
cv2.imwrite(cur_path + '/../demo/' + 'results' + im_name, im)
# show_boxes(im, dets_nms, classes, 1)
print 'done'
if __name__ == '__main__':
main()
|
tools/ivwpy/regression/database.py | alexanderbock/inviwo | 349 | 11154994 | #*********************************************************************************
#
# Inviwo - Interactive Visualization Workshop
#
# Copyright (c) 2013-2021 Inviwo Foundation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#*********************************************************************************
import os
import sys
import datetime
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, select
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy import func
from sqlalchemy import desc
# Table declarations
# ┌──────────────┐
# │ Quantity │ ┌──────────────┐
# │ │ │ Series │
# │ id │ │ │
# │ created │ │ id │ ┌──────────────┐
# │ name │ │ created │ │ Measurement │
# │ unit │ │ name │ │ │
# │ serieses │n────────────────────────1│ quantity_id │ │ id │
# │ │ ┌──1│ test_id │ │ created │
# └──────────────┘ │ │ measurements │n───1│ series_id │
# ┌──────────────┐ ┌──────────────┐ │ │ │ ┌─1│ testrun_id │
# │ Module │ │ Test │ │ └──────────────┘ │ │ value │
# │ │ │ │ │ │ │ │
# │ id │ │ id │ │ ┌──────────────┐ │ └──────────────┘
# │ created │ │ created │ │ │ SkipRun │ │
# │ name │ │ name │ │ │ │ │
# │ tests │n─1│ module_id │ │ │ id │ │
# │ │ │ serieses │n──┘ │ created │ │
# └──────────────┘ │ skipruns │n─────1│ test_id │ │
# ┌──────────────┐ │ testruns │n─┐ ┌─1│ run_id │ │
# │ Run │ │ │ │ │ │ reason │ │
# │ │ └──────────────┘ │ │ │ │ │
# │ id │ │ │ └──────────────┘ │
# │ created │ │ │ ┌──────────────┐ │
# │ skipruns │n────────────────────┼─┘ │ TestRun │ │
# │ testruns │n───────────┐ │ │ │ │
# │ │ │ │ │ id │ │
# └──────────────┘ │ │ │ created │ │ ┌──────────────┐
# ┌──────────────┐ │ └───1│ test_id │ │ │ Failure │
# │ Commit │ └────────────1│ run_id │ │ │ │
# │ │ ┌────────────1│ commit_id │ │ │ id │
# │ id │ │ │ measurements │n─┘ │ created │
# │ created │ │ │ failures │n───1│ testrun_id │
# │ name │ │ │ config │ │ key │
# │ hash │ │ │ │ │ message │
# │ testruns │n───────────┘ └──────────────┘ │ │
# │ │ └──────────────┘
# └──────────────┘
SqlBase = declarative_base()
class Module(SqlBase):
__tablename__ = 'module'
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
name = Column(String, nullable=False, unique=True)
class Quantity(SqlBase):
__tablename__ = "quantity"
id = Column(Integer, primary_key=True)
created = Column(DateTime(), nullable=False, default=datetime.datetime.now)
name = Column(String(), nullable=False, unique=True)
unit = Column(String(), nullable=False)
class Run(SqlBase):
__tablename__ = "run"
id = Column(Integer, primary_key=True)
created = Column(DateTime(), nullable=False, default=datetime.datetime.now)
class Test(SqlBase):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
module_id = Column(Integer, ForeignKey('module.id'))
module = relationship(Module, backref=backref('tests', uselist=True))
name = Column(String, nullable=False)
class Series(SqlBase):
__tablename__ = "series"
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
test_id = Column(Integer, ForeignKey('test.id'))
test = relationship(Test, backref=backref('serieses', uselist=True))
quantity_id = Column(Integer, ForeignKey('quantity.id'))
quantity = relationship(Quantity, backref=backref('serieses', uselist=True))
name = Column(String(), nullable=False)
class Commit(SqlBase):
__tablename__ = "commit"
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
hash = Column(String, nullable=False, unique=True)
date = Column(DateTime, nullable=False)
author = Column(String)
message = Column(String)
server = Column(String)
class SkipRun(SqlBase):
__tablename__ = "skiprun"
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
test_id = Column(Integer, ForeignKey('test.id'))
test = relationship(Test, backref=backref('skipruns', uselist=True))
run_id = Column(Integer, ForeignKey('run.id'))
run = relationship(Run, backref=backref('skipruns', uselist=True))
reason = Column(String())
class TestRun(SqlBase):
__tablename__ = "testrun"
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
test_id = Column(Integer, ForeignKey('test.id'))
test = relationship(Test, backref=backref('testruns', uselist=True))
commit_id = Column(Integer, ForeignKey('commit.id'))
commit = relationship(Commit, backref=backref('testruns', uselist=True))
run_id = Column(Integer, ForeignKey('run.id'))
run = relationship(Run, backref=backref('testruns', uselist=True))
config = Column(String())
class Failure(SqlBase):
__tablename__ = "failure"
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
testrun_id = Column(Integer, ForeignKey('testrun.id'))
testrun = relationship(TestRun, backref=backref('failures', uselist=True))
key = Column(String, nullable=False)
message = Column(String)
class Measurement(SqlBase):
__tablename__ = "measurement"
id = Column(Integer, primary_key=True)
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
series_id = Column(Integer, ForeignKey('series.id'))
series = relationship(Series, backref=backref('measurements', uselist=True))
testrun_id = Column(Integer, ForeignKey('testrun.id'))
testrun = relationship(TestRun, backref=backref('measurements', uselist=True))
value = Column(Float, nullable=False)
class Database():
def __init__(self, dbfile):
self.dbfile = dbfile
if not os.path.exists(self.dbfile): # open
self.engine = create_engine('sqlite:///' + dbfile)
SqlBase.metadata.create_all(self.engine)
else: # create db
self.engine = create_engine('sqlite:///' + dbfile)
SqlBase.metadata.bind = self.engine
self.session = sessionmaker(bind=self.engine)()
def addEntry(self, Type, **kvargs):
entry = Type(**kvargs)
self.session.add(entry)
self.session.commit()
return entry;
def getOrAddModule(self, name):
module = self.session.query(Module).filter(Module.name == name).one_or_none()
if module == None: module = self.addEntry(Module, name = name)
return module
def getOrAddTest(self, module, name):
if isinstance(module, str): module = self.getOrAddModule(module)
test = self.session.query(Test).filter(Test.name == name,
Test.module == module).one_or_none()
if test == None: test = self.addEntry(Test, name = name, module = module)
return test
def getOrAddQuantity(self, name, unit):
quantity = self.session.query(Quantity).filter(Quantity.name == name).one_or_none()
if quantity == None: quantity = self.addEntry(Quantity, name = name, unit = unit)
return quantity
def getOrAddSeries(self, test, quantity, name):
series = self.session.query(Series).filter(Series.name == name,
Series.test == test,
Series.quantity == quantity).one_or_none()
if series == None: series = self.addEntry(Series, name = name, test = test, quantity = quantity)
return series
def getCommit(self, hash):
commit = self.session.query(Commit).filter(Commit.hash == hash).one_or_none()
return commit
def addCommit(self, hash, date, author, message, server):
return self.addEntry(Commit,
hash = hash,
date = date,
author = author,
message = message,
server = server)
def getOrAddCommit(self, hash, date, author, message, server):
commit = self.getCommit(hash)
if commit is None: commit = self.addCommit(hash, date, author, message, server)
return commit
def addRun(self):
return self.addEntry(Run)
def addSkipRun(self, run, test, reason = ""):
return self.addEntry(SkipRun, run = run, test = test, reason = reason)
def addTestRun(self, run, test, commit, config = ""):
return self.addEntry(TestRun, run = run, test = test, commit = commit, config = config)
def addFailure(self, testrun, key, message):
return self.addEntry(Failure, testrun = testrun, key = key, message = message)
def addMeasurement(self, series, testrun, value):
return self.addEntry(Measurement, series = series, testrun = testrun, value = value)
def getModules(self):
return self.session.query(Module).all()
def getRuns(self):
return self.session.query(Run).all()
def getSeries(self, modulename, testname, seriesname):
return self.session.query(Series).join(Test).join(Module).filter(Test.name == testname,
Module.name == modulename,
Series.name == seriesname).one_or_none()
def getSerieses(self, modulename, testname):
return self.session.query(Series).join(Test).join(Module).filter(Test.name == testname,
Module.name == modulename).all()
def getLastTestRun(self, modulename, testname):
return self.session.query(TestRun).\
join(Test).\
join(Module).\
filter(Test.name == testname,
Module.name == modulename).\
order_by(desc(TestRun.created)).\
first()
def getLastSuccessFirstFailure(self, modulename, testname):
testruns = self.session.query(TestRun).\
join(Test).\
join(Module).\
filter(Test.name == testname,
Module.name == modulename).\
order_by(desc(TestRun.created)).\
all()
lastSuccess = None
firstFailure = None
for testrun in testruns:
if len(testrun.failures) == 0:
lastSuccess = testrun
break
else:
firstFailure = testrun
return lastSuccess, firstFailure
if __name__ == '__main__':
db = Database(sys.argv[1])
|
ch05/5.1.1.py | AaronZhengkk/SpiderBook | 990 | 11155010 | <gh_stars>100-1000
#coding:utf-8
import json
from bs4 import BeautifulSoup
import requests
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers={'User-Agent':user_agent}
r = requests.get('http://seputu.com/',headers=headers)
soup = BeautifulSoup(r.text,'html.parser',from_encoding='utf-8')#html.parser
content=[]
for mulu in soup.find_all(class_="mulu"):
h2 = mulu.find('h2')
if h2!=None:
h2_title = h2.string#获取标题
list=[]
for a in mulu.find(class_='box').find_all('a'):#获取所有的a标签中url和章节内容
href = a.get('href')
box_title = a.get('title')
list.append({'href':href,'box_title':box_title})
content.append({'title':h2_title,'content':list})
with open('qiye.json','w') as fp:
json.dump(content,fp=fp,indent=4)
|
tests/star_wars_data.py | artofhuman/graphql-relay-py | 124 | 11155015 | """This defines a basic set of data for our Star Wars Schema.
This data is hard coded for the sake of the demo, but you could imagine
fetching this data from a backend service rather than from hardcoded
JSON objects in a more complex demo.
"""
from typing import List, NamedTuple, Optional
class Ship(NamedTuple):
id: str
name: str
all_ships = [
Ship("1", "X-Wing"),
Ship("2", "Y-Wing"),
Ship("3", "A-Wing"),
# Yeah, technically it's Corellian. But it flew in the service of the rebels,
# so for the purposes of this demo it's a rebel ship.
Ship("4", "Millenium Falcon"),
Ship("5", "Home One"),
Ship("6", "TIE Fighter"),
Ship("7", "TIE Interceptor"),
Ship("8", "Executor"),
]
class Faction(NamedTuple):
id: str
name: str
ships: List[str]
rebels = Faction("1", "Alliance to Restore the Republic", ["1", "2", "3", "4", "5"])
empire = Faction("2", "Galactic Empire", ["6", "7", "8"])
all_factions = [rebels, empire]
def create_ship(ship_name: str, faction_id: str) -> Ship:
new_ship = Ship(str(len(all_ships) + 1), ship_name)
all_ships.append(new_ship)
faction = get_faction(faction_id)
if faction:
faction.ships.append(new_ship.id)
return new_ship
def get_ship(id_: str) -> Optional[Ship]:
return next(filter(lambda ship: ship.id == id_, all_ships), None) # type: ignore
def get_faction(id_: str) -> Optional[Faction]:
return next(
filter(lambda faction: faction.id == id_, all_factions), None # type: ignore
)
def get_rebels() -> Faction:
return rebels
def get_empire() -> Faction:
return empire
|
bin/colorchart.py | stefanholek/blessed | 826 | 11155076 | # -*- coding: utf-8 -*-
"""
Utility to show X11 colors in 24-bit and downconverted to 256, 16, and 8 colors.
The time to generate the table is displayed to give an indication of how long each algorithm takes
compared to the others.
"""
# std imports
import sys
import timeit
import colorsys
# local
import blessed
from blessed.color import COLOR_DISTANCE_ALGORITHMS
from blessed.colorspace import X11_COLORNAMES_TO_RGB
def sort_colors():
"""Sort colors by HSV value and remove duplicates."""
colors = {}
for color_name, rgb_color in X11_COLORNAMES_TO_RGB.items():
if rgb_color not in colors:
colors[rgb_color] = color_name
return sorted(colors.items(),
key=lambda rgb: colorsys.rgb_to_hsv(*rgb[0]),
reverse=True)
ALGORITHMS = tuple(sorted(COLOR_DISTANCE_ALGORITHMS))
SORTED_COLORS = sort_colors()
def draw_chart(term):
"""Draw a chart of each color downconverted with selected distance algorithm."""
sys.stdout.write(term.home)
width = term.width
line = ''
line_len = 0
start = timeit.default_timer()
for color in SORTED_COLORS:
chart = ''
for noc in (1 << 24, 256, 16, 8):
term.number_of_colors = noc
chart += getattr(term, color[1])(u'█')
if line_len + 5 > width:
line += '\n'
line_len = 0
line += ' %s' % chart
line_len += 5
elapsed = round((timeit.default_timer() - start) * 1000)
print(line)
left_text = '[] to select, q to quit'
center_text = f'{term.color_distance_algorithm}'
right_text = f'{elapsed:d} ms\n'
sys.stdout.write(term.clear_eos + left_text +
term.center(center_text, term.width -
term.length(left_text) - term.length(right_text)) +
right_text)
def color_chart(term):
"""Main color chart application."""
term = blessed.Terminal()
algo_idx = 0
dirty = True
with term.cbreak(), term.hidden_cursor(), term.fullscreen():
while True:
if dirty:
draw_chart(term)
inp = term.inkey()
dirty = True
if inp in '[]':
algo_idx += 1 if inp == ']' else -1
algo_idx %= len(ALGORITHMS)
term.color_distance_algorithm = ALGORITHMS[algo_idx]
elif inp == '\x0c':
pass
elif inp in 'qQ':
break
else:
dirty = False
if __name__ == '__main__':
color_chart(blessed.Terminal())
|
Demo/scripts/script.py | cemeyer/tauthon | 473 | 11155080 | <reponame>cemeyer/tauthon
#! /usr/bin/env python
# script.py -- Make typescript of terminal session.
# Usage:
# -a Append to typescript.
# -p Use Python as shell.
# Author: <NAME>.
import os, time, sys, getopt
import pty
def read(fd):
data = os.read(fd, 1024)
script.write(data)
return data
shell = 'sh'
filename = 'typescript'
mode = 'w'
if os.environ.has_key('SHELL'):
shell = os.environ['SHELL']
try:
opts, args = getopt.getopt(sys.argv[1:], 'ap')
except getopt.error, msg:
print '%s: %s' % (sys.argv[0], msg)
sys.exit(2)
for o, a in opts:
if o == '-a':
mode = 'a'
elif o == '-p':
shell = 'python'
script = open(filename, mode)
sys.stdout.write('Script started, file is %s\n' % filename)
script.write('Script started on %s\n' % time.ctime(time.time()))
pty.spawn(shell, read)
script.write('Script done on %s\n' % time.ctime(time.time()))
sys.stdout.write('Script done, file is %s\n' % filename)
|
tests/openbb_terminal/cryptocurrency/overview/test_blockchaincenter_model.py | tehcoderer/GamestonkTerminal | 255 | 11155103 | <filename>tests/openbb_terminal/cryptocurrency/overview/test_blockchaincenter_model.py
import pytest
from openbb_terminal.cryptocurrency.overview import blockchaincenter_model
@pytest.mark.skip
@pytest.mark.vcr
@pytest.mark.parametrize(
"days,since,until",
[
(365, 1_601_596_800, 1_641_573_787),
(90, 1_601_596_800, 1_641_573_787),
(30, 1_601_596_800, 1_641_573_787),
],
)
def test_get_altcoin_index(days, since, until, recorder):
df = blockchaincenter_model.get_altcoin_index(days, since, until)
recorder.capture(df)
|
CV/SemSegPaddle/src/models/backbone/hrnet.py | pkulzb/Research | 1,319 | 11155115 | # coding: utf8
# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
from src.utils.config import cfg
class HRNet():
"""
Reference:
<NAME>, et al. "Deep High-Resolution Representation Learning for Human Pose Estimation.", In CVPR 2019
"""
def __init__(self, stride=4, seg_flag=False):
self.stride= stride
self.seg_flag=seg_flag
def conv_bn_layer(self, input, filter_size, num_filters, stride=1, padding=1, num_groups=1, if_act=True, name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=num_groups,
act=None,
param_attr=ParamAttr(initializer=MSRA(), name=name + '_weights'),
bias_attr=False)
bn_name = name + '_bn'
bn = fluid.layers.batch_norm(input=conv,
param_attr=ParamAttr(name=bn_name + "_scale",
initializer=fluid.initializer.Constant(1.0)),
bias_attr=ParamAttr(name=bn_name + "_offset",
initializer=fluid.initializer.Constant(0.0)),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
if if_act:
bn = fluid.layers.relu(bn)
return bn
def basic_block(self, input, num_filters, stride=1, downsample=False, name=None):
residual = input
conv = self.conv_bn_layer(input=input, filter_size=3, num_filters=num_filters, stride=stride, name=name + '_conv1')
conv = self.conv_bn_layer(input=conv, filter_size=3, num_filters=num_filters, if_act=False, name=name + '_conv2')
if downsample:
residual = self.conv_bn_layer(input=input, filter_size=1, num_filters=num_filters, if_act=False,
name=name + '_downsample')
return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
def bottleneck_block(self, input, num_filters, stride=1, downsample=False, name=None):
residual = input
conv = self.conv_bn_layer(input=input, filter_size=1, num_filters=num_filters, name=name + '_conv1')
conv = self.conv_bn_layer(input=conv, filter_size=3, num_filters=num_filters, stride=stride, name=name + '_conv2')
conv = self.conv_bn_layer(input=conv, filter_size=1, num_filters=num_filters * 4, if_act=False,
name=name + '_conv3')
if downsample:
residual = self.conv_bn_layer(input=input, filter_size=1, num_filters=num_filters * 4, if_act=False,
name=name + '_downsample')
return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
def fuse_layers(self, x, channels, multi_scale_output=True, name=None):
out = []
for i in range(len(channels) if multi_scale_output else 1):
residual = x[i]
shape = residual.shape
width = shape[-1]
height = shape[-2]
for j in range(len(channels)):
if j > i:
y = self.conv_bn_layer(x[j], filter_size=1, num_filters=channels[i], if_act=False,
name=name + '_layer_' + str(i + 1) + '_' + str(j + 1))
y = fluid.layers.resize_bilinear(input=y, out_shape=[height, width])
residual = fluid.layers.elementwise_add(x=residual, y=y, act=None)
elif j < i:
y = x[j]
for k in range(i - j):
if k == i - j - 1:
y = self.conv_bn_layer(y, filter_size=3, num_filters=channels[i], stride=2, if_act=False,
name=name + '_layer_' + str(i + 1) + '_' + str(j + 1) + '_' + str(k + 1))
else:
y = self.conv_bn_layer(y, filter_size=3, num_filters=channels[j], stride=2,
name=name + '_layer_' + str(i + 1) + '_' + str(j + 1) + '_' + str(k + 1))
residual = fluid.layers.elementwise_add(x=residual, y=y, act=None)
residual = fluid.layers.relu(residual)
out.append(residual)
return out
def branches(self, x, block_num, channels, name=None):
out = []
for i in range(len(channels)):
residual = x[i]
for j in range(block_num):
residual = self.basic_block(residual, channels[i],
name=name + '_branch_layer_' + str(i + 1) + '_' + str(j + 1))
out.append(residual)
return out
def high_resolution_module(self, x, channels, multi_scale_output=True, name=None):
residual = self.branches(x, 4, channels, name=name)
out = self.fuse_layers(residual, channels, multi_scale_output=multi_scale_output, name=name)
return out
def transition_layer(self, x, in_channels, out_channels, name=None):
num_in = len(in_channels)
num_out = len(out_channels)
out = []
for i in range(num_out):
if i < num_in:
if in_channels[i] != out_channels[i]:
residual = self.conv_bn_layer(x[i], filter_size=3, num_filters=out_channels[i],
name=name + '_layer_' + str(i + 1))
out.append(residual)
else:
out.append(x[i])
else:
residual = self.conv_bn_layer(x[-1], filter_size=3, num_filters=out_channels[i], stride=2,
name=name + '_layer_' + str(i + 1))
out.append(residual)
return out
def stage(self, x, num_modules, channels, multi_scale_output=True, name=None):
out = x
for i in range(num_modules):
if i == num_modules - 1 and multi_scale_output == False:
out = self.high_resolution_module(out, channels, multi_scale_output=False, name=name + '_' + str(i + 1))
else:
out = self.high_resolution_module(out, channels, name=name + '_' + str(i + 1))
return out
def layer1(self, input, name=None):
conv = input
for i in range(4):
conv = self.bottleneck_block(conv, num_filters=64, downsample=True if i == 0 else False,
name=name + '_' + str(i + 1))
return conv
#def highResolutionNet(input, num_classes):
def net(self, input, num_classes=1000):
channels_2 = cfg.MODEL.HRNET.STAGE2.NUM_CHANNELS
channels_3 = cfg.MODEL.HRNET.STAGE3.NUM_CHANNELS
channels_4 = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS
num_modules_2 = cfg.MODEL.HRNET.STAGE2.NUM_MODULES
num_modules_3 = cfg.MODEL.HRNET.STAGE3.NUM_MODULES
num_modules_4 = cfg.MODEL.HRNET.STAGE4.NUM_MODULES
x = self.conv_bn_layer(input=input, filter_size=3, num_filters=64, stride=2, if_act=True, name='layer1_1')
x = self.conv_bn_layer(input=x, filter_size=3, num_filters=64, stride=2, if_act=True, name='layer1_2')
la1 = self.layer1(x, name='layer2')
tr1 = self.transition_layer([la1], [256], channels_2, name='tr1')
st2 = self.stage(tr1, num_modules_2, channels_2, name='st2')
tr2 = self.transition_layer(st2, channels_2, channels_3, name='tr2')
st3 = self.stage(tr2, num_modules_3, channels_3, name='st3')
tr3 = self.transition_layer(st3, channels_3, channels_4, name='tr3')
st4 = self.stage(tr3, num_modules_4, channels_4, name='st4')
# upsample
shape = st4[0].shape
height, width = shape[-2], shape[-1]
st4[1] = fluid.layers.resize_bilinear(st4[1], out_shape=[height, width])
st4[2] = fluid.layers.resize_bilinear(st4[2], out_shape=[height, width])
st4[3] = fluid.layers.resize_bilinear(st4[3], out_shape=[height, width])
out = fluid.layers.concat(st4, axis=1)
if self.seg_flag and self.stride==4:
return out
last_channels = sum(channels_4)
out = conv_bn_layer(input=out, filter_size=1, num_filters=last_channels, stride=1, if_act=True, name='conv-2')
out= fluid.layers.conv2d(
input=out,
num_filters=num_classes,
filter_size=1,
stride=1,
padding=0,
act=None,
param_attr=ParamAttr(initializer=MSRA(), name='conv-1_weights'),
bias_attr=False)
out = fluid.layers.resize_bilinear(out, input.shape[2:])
return out
def hrnet():
model = HRNet(stride=4, seg_flag=True)
return model
if __name__ == '__main__':
image_shape = [3, 769, 769]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
logit = hrnet(image, 4)
print("logit:", logit.shape)
|
modules/google-earth-engine/docker/sepal-ee/sepal/ee/temporal_segmentation/harmonic_fit.py | BuddyVolly/sepal | 153 | 11155116 | import ee
import math
J_DAYS = 0
FRACTIONAL_YEARS = 1
UNIX_TIME_MILLIS = 2
def fitImage(coefs, t, dateFormat, harmonics):
return ee.ImageCollection(
fit(t, dateFormat, harmonics, lambda index: coefs.arrayGet([index]))
) \
.reduce(ee.Reducer.sum()) \
.regexpRename('(.*)_coefs_sum', '$1')
def fitNumber(coefs, t, dateFormat, harmonics):
return fit(t, dateFormat, harmonics, lambda index: ee.Number(coefs.get(index))) \
.reduce(ee.Reducer.sum())
def meanImage(coefs, tStart, tEnd, dateFormat, harmonics):
return mean(tStart, tEnd, dateFormat, harmonics, lambda index: coefs.arrayGet([index])) \
.regexpRename('(.*)_coefs', '$1')
def meanNumber(coefs, tStart, tEnd, dateFormat, harmonics):
return mean(tStart, tEnd, dateFormat, harmonics, lambda index: ee.Number(coefs.get(index)))
def fit(t, dateFormat=0, harmonics=3, coefExtractor=None):
def c(index):
return coefExtractor(index)
omega = getOmega(dateFormat)
return ee.List([
c(0) \
.add(c(1).multiply(t)),
c(2).multiply(t.multiply(omega).cos()) \
.add(c(3).multiply(t.multiply(omega).sin())),
c(4).multiply(t.multiply(omega * 2).cos()) \
.add(c(5).multiply(t.multiply(omega * 2).sin())),
c(6).multiply(t.multiply(omega * 3).cos()) \
.add(c(7).multiply(t.multiply(omega * 3).sin()))
]) \
.slice(0, ee.Number(harmonics).add(1))
def mean(tStart, tEnd, dateFormat, harmonics=3, coefExtractor=None):
expressions = [
'c0 + (c1 * (s + e) / 2)',
'1/(e - s) * ((c3 * (cos(w * s) - cos(e * w)) - c2 * (sin(w * s) - sin(e * w)))/w - ((s - e) * (c1 * (s + e) + 2 * c0)) / 2)',
'1/(e - s) * -(c4 * (sin(2 * w * s) - sin(2 * e * w)) - c5 * (cos(2 * w * s) - cos(2 * e * w)) + 2 * c2 * (sin(w * s) - sin(e * w)) - 2 * c3 * (cos(w * s) - cos(e * w)) + w * (s - e) * (c1 * (s + e) + 2 * c0)) / (2 * w)',
'1/(e - s) * -(2 * c6 * (sin(3 * w * s) - sin(3 * e * w)) - 2 * c7 * (cos(3 * w * s) - cos(3 * e * w)) + 3 * (c4 * (sin(2 * w * s) - sin(2 * e * w)) + w * (s - e) * (c1 * (s + e) + 2 * c0)) - 3 * c5 * (cos(2 * w * s) - cos(2 * e * w)) + 6 * c2 * (sin(w * s) - sin(e * w)) - 6 * c3 * (cos(w * s) - cos(e * w)))/(6 * w)'
]
return ee.Image().expression(expressions[harmonics], {
's': tStart,
'e': tEnd,
'w': getOmega(dateFormat),
'c0': coefExtractor(0),
'c1': coefExtractor(1),
'c2': coefExtractor(2),
'c3': coefExtractor(3),
'c4': coefExtractor(4),
'c5': coefExtractor(5),
'c6': coefExtractor(6),
'c7': coefExtractor(7),
})
def getOmega(dateFormat):
if dateFormat == J_DAYS:
return 2.0 * math.pi / 365.25
elif dateFormat == FRACTIONAL_YEARS:
return 2.0 * math.pi
elif dateFormat == UNIX_TIME_MILLIS:
return 2.0 * math.pi * 60 * 60 * 24 * 365.25
else:
raise Error('Only dateFormat 0 (jdate), 1 (fractional years), and 2 (unix seconds) is supported')
|
swampdragon/management/commands/runsd.py | aswinkp/swampdragon | 366 | 11155120 | <filename>swampdragon/management/commands/runsd.py
from django.core.management.base import BaseCommand
from swampdragon.swampdragon_server import run_server
class Command(BaseCommand):
args = '<host_port>'
def handle(self, *args, **options):
host_port = None
if args:
host_port = args[0]
run_server(host_port=host_port)
"""
# This is the preferred way to implement positional arguments in Django 1.8, but breaks pre 1.8
def add_arguments(self, parser):
parser.add_argument('host_port', nargs='?', default=None, type=str)
def handle(self, *args, **options):
run_server(host_port=options['host_port'])
"""
|
examples/websockets_lib/app.py | mrmilu/graphql-ws | 272 | 11155152 | from graphql_ws.websockets_lib import WsLibSubscriptionServer
from graphql.execution.executors.asyncio import AsyncioExecutor
from sanic import Sanic, response
from sanic_graphql import GraphQLView
from schema import schema
from template import render_graphiql
app = Sanic(__name__)
@app.listener("before_server_start")
def init_graphql(app, loop):
app.add_route(
GraphQLView.as_view(schema=schema, executor=AsyncioExecutor(loop=loop)),
"/graphql",
)
@app.route("/graphiql")
async def graphiql_view(request):
return response.html(render_graphiql())
subscription_server = WsLibSubscriptionServer(schema)
@app.websocket("/subscriptions", subprotocols=["graphql-ws"])
async def subscriptions(request, ws):
await subscription_server.handle(ws)
return ws
app.run(host="0.0.0.0", port=8000)
|
tests/algorithms/test_job_normalizers_elasticsearch.py | bhagyaramgpo/skills-ml | 147 | 11155154 | import httpretty
import json
import re
from skills_utils.es import basic_client
from skills_ml.algorithms.job_normalizers.elasticsearch import NormalizeTopNIndexer
def mock_job_posting_generator(postings):
return lambda s3_conn, quarter: (json.dumps(post) for post in postings)
@httpretty.activate
def test_normalize_topn():
generator = mock_job_posting_generator((
{
'occupationalCategory': 'Line Cooks, Chefs',
'description': 'A food job description',
'title': 'Food Truck Sous Chef'
},
{
'occupationalCategory': 'Actors',
'description': 'An actor job description',
'title': 'Broadway Star'
},
))
indexer = NormalizeTopNIndexer(
s3_conn=None,
es_client=basic_client(),
job_titles_index='stuff',
alias_name='otherstuff',
quarter='2014Q1',
job_postings_generator=generator
)
# TODO: abstract the ES mocking to a module
# This means that the titles endpoint will say that all input job titles
# match best with 'Janitor'
mock_result = {
'hits': {
'hits': [
{'_source': {'jobtitle': ['Janitor']}}
]
}
}
httpretty.register_uri(
httpretty.GET,
re.compile('http://localhost:9200/stuff/_search'),
body=json.dumps(mock_result),
content_type='application/json'
)
index = 'stuff'
documents = [document for document in indexer._iter_documents(target_index=index)]
assert len(documents) == 2
for document in documents:
assert document['_source']['canonicaltitle'] == 'Janitor'
assert document['_source']['quarters'] == ['2014Q1']
assert document['_source']['occupation'] in ['Line Cooks, Chefs', 'Actors']
assert document['_source']['jobtitle'] in ['Food Truck Sous Chef', 'Broadway Star']
assert document['_index'] == index
assert document['_id']
|
model_search/controller.py | dywsjtu/model_search | 3,315 | 11155175 | <filename>model_search/controller.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A Phoenix controller - Responsible for choosing generators.
A controller is reponsible for choosing what to do in a trial.
It does so by choosing which generator to run:
* It could be a SearchGenerator - responsible for exploration
* It could be a PriorGenerator - responsible for exploitation - or adding good
trained models to the ensemble.
* It could be a ReplayGenerator - responsible for importing submodels in an
ensemble when performing replay.
It is also a modular way to introduce phases (e.g., compression in some of the
trials).
"""
import abc
import collections
from absl import logging
from model_search.architecture import architecture_utils
from model_search.generators import base_tower_generator
from model_search.generators import trial_utils
from model_search.generators.prior_generator import PriorGenerator
from model_search.generators.replay_generator import ReplayGenerator
from model_search.generators.search_candidate_generator import SearchCandidateGenerator
class ReplayState(object):
"""Helper class to understand Replay state."""
def __init__(self, phoenix_spec):
self._phoenix_spec = phoenix_spec
def is_replay(self):
"""Returns True if this is a replay run."""
return self._phoenix_spec.HasField("replay")
def is_search(self):
"""Returns True if this is a search run."""
return not self._phoenix_spec.HasField("replay")
def replay_is_training_a_tower(self, my_id):
"""Returns True if we are training a new tower in a replay run.
Example:
1. In adaptive ensembling, every trial is training one new tower, so the
return value is always True.
2. In a non-adaptive ensembling, every trial except the last one is
training a new tower, whereas the last trial just ensembles the trained
towers.
Args:
my_id: trial id. Returns True if we train a new tower, and False
otherwise.
"""
towers_number = len(self._phoenix_spec.replay.towers)
output = (
trial_utils.adaptive_or_residual_ensemble(self._phoenix_spec) or
my_id < towers_number or towers_number == 1)
return output
def replay_is_importing_towers(self, my_id):
"""Returns true if we are importing a tower in this replay trial.
Examples:
1. For adaptive ensembling, we import towers for every trial with id
greater than 1.
2. For non-adaptive ensembling, we import towers only in the last trial.
Args:
my_id: trial id.
Returns:
True if we are importing a tower and False otherwise.
"""
towers_number = len(self._phoenix_spec.replay.towers)
output = ((my_id == towers_number and towers_number > 1) or
(my_id > 1 and
trial_utils.adaptive_or_residual_ensemble(self._phoenix_spec)))
return output
class GeneratorWithTrials(
collections.namedtuple("GeneratorWithTrials",
["instance", "relevant_trials"])):
"""A generator instance with all relevant trials."""
def __new__(cls, instance, relevant_trials):
return super(GeneratorWithTrials, cls).__new__(cls, instance,
relevant_trials)
def _return_generators(generators):
"""Sets the number of towers to zero when generator isn't used."""
for generator_name in base_tower_generator.ALL_GENERATORS:
if generator_name not in generators.keys():
architecture_utils.set_number_of_towers(generator_name, 0)
return generators
class Controller(object, metaclass=abc.ABCMeta):
"""An api for a controller."""
@abc.abstractmethod
def get_generators(self, my_id, all_trials):
"""Returns the `Dict` of generators that need to be triggered.
Args:
my_id: an int with the current trial id.
all_trials: a list of metadata.trial.Trial protos with all information in
the current study.
Returns:
A dict of generator names as keys and GeneratorWithTrials as values.
"""
class InProcessController(Controller):
"""An In process Phoenix controller.
This controller assumes search, ensembling, distillation and replay all run
in the same binary.
It will allocate trials for the various functionalities based on trial id.
"""
def __init__(self, phoenix_spec, metadata):
self._phoenix_spec = phoenix_spec
self._search_candidate_generator = SearchCandidateGenerator(
phoenix_spec=phoenix_spec, metadata=metadata)
self._prior_candidate_generator = PriorGenerator(
phoenix_spec=phoenix_spec, metadata=metadata)
self._replay_generator = ReplayGenerator(
phoenix_spec=phoenix_spec, metadata=metadata)
self._replay_state = ReplayState(phoenix_spec)
def get_generators(self, my_id, all_trials):
"""Determines which generators to run."""
output = {}
ensemble_spec = self._phoenix_spec.ensemble_spec
distillation_spec = self._phoenix_spec.distillation_spec
logging.info("trial id: %d", my_id)
# Handling replay
if self._replay_state.is_replay():
if self._replay_state.replay_is_training_a_tower(my_id):
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator, [])
})
if self._replay_state.replay_is_importing_towers(my_id):
output.update({
base_tower_generator.REPLAY_GENERATOR:
GeneratorWithTrials(self._replay_generator, [])
})
return _return_generators(output)
# Real Search from here on.
# First: User suggestions first! No ensembling in suggestions.
if my_id <= len(self._phoenix_spec.user_suggestions):
logging.info("user suggestions mode")
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator, [])
})
return _return_generators(output)
# Second: Handle non-adaptive search
if trial_utils.is_nonadaptive_ensemble_search(ensemble_spec):
logging.info("non adaptive ensembling mode")
pool_size = ensemble_spec.nonadaptive_search.minimal_pool_size
search_trials = [t for t in all_trials if t.id <= pool_size]
# Pool too small, continue searching
if my_id <= pool_size:
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator,
search_trials)
})
return _return_generators(output)
# Pool hit critical mass, start ensembling.
else:
output.update({
base_tower_generator.PRIOR_GENERATOR:
GeneratorWithTrials(self._prior_candidate_generator,
search_trials)
})
return _return_generators(output)
# Third: Adaptive / Residual ensemble search
if (trial_utils.is_adaptive_ensemble_search(ensemble_spec) or
trial_utils.is_residual_ensemble_search(ensemble_spec)):
logging.info("adaptive/residual ensembling mode")
increase_every = ensemble_spec.adaptive_search.increase_width_every
pool_size = my_id // increase_every * increase_every
ensembling_trials = [
trial for trial in all_trials if trial.id <= pool_size
]
search_trials = [trial for trial in all_trials if trial.id > pool_size]
if ensembling_trials:
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator,
search_trials),
base_tower_generator.PRIOR_GENERATOR:
GeneratorWithTrials(self._prior_candidate_generator,
ensembling_trials)
})
return _return_generators(output)
else:
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator,
search_trials)
})
return _return_generators(output)
# Fourth: Intermixed Search.
if trial_utils.is_intermixed_ensemble_search(ensemble_spec):
logging.info("intermix ensemble search mode")
n = ensemble_spec.intermixed_search.try_ensembling_every
search_trials = [t for t in all_trials if t.id % n != 0]
if my_id % n != 0:
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator,
search_trials)
})
if (trial_utils.get_trial_mode(
ensemble_spec, distillation_spec,
my_id) == trial_utils.TrialMode.DISTILLATION):
output.update({
base_tower_generator.PRIOR_GENERATOR:
GeneratorWithTrials(self._prior_candidate_generator,
all_trials)
})
return _return_generators(output)
else:
output.update({
base_tower_generator.PRIOR_GENERATOR:
GeneratorWithTrials(self._prior_candidate_generator,
search_trials)
})
return _return_generators(output)
# No ensembling
output.update({
base_tower_generator.SEARCH_GENERATOR:
GeneratorWithTrials(self._search_candidate_generator, all_trials)
})
return _return_generators(output)
|
tests/test_all_tables_e2e/test_porcelain_table.py | mgedmin/terminaltables | 742 | 11155209 | """PorcelainTable end to end testing."""
from terminaltables import PorcelainTable
def test_single_line():
"""Test single-lined cells."""
table_data = [
['Name', 'Color', 'Type'],
['Avocado', 'green', 'nut'],
['Tomato', 'red', 'fruit'],
['Lettuce', 'green', 'vegetable'],
['Watermelon', 'green']
]
table = PorcelainTable(table_data)
table.justify_columns[0] = 'left'
table.justify_columns[1] = 'center'
table.justify_columns[2] = 'right'
actual = table.table
expected = (
' Name | Color | Type \n'
' Avocado | green | nut \n'
' Tomato | red | fruit \n'
' Lettuce | green | vegetable \n'
' Watermelon | green | '
)
assert actual == expected
def test_multi_line():
"""Test multi-lined cells."""
table_data = [
['Show', 'Characters'],
['Rugrats', '<NAME>, <NAME>, <NAME>, Lillian DeVille, Angelica Pickles,\nDil Pickles'],
['South Park', '<NAME>, <NAME>, <NAME>, <NAME>']
]
table = PorcelainTable(table_data)
# Test defaults.
actual = table.table
expected = (
' Show | Characters \n'
' Rugrats | <NAME>, <NAME>, <NAME>, Lillian DeVille, Angelica Pickles, \n'
' | Dil Pickles \n'
' South Park | <NAME>, <NAME>, <NAME>, <NAME> '
)
assert actual == expected
# Justify right.
table.justify_columns = {1: 'right'}
actual = table.table
expected = (
' Show | Characters \n'
' Rugrats | <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, \n'
' | <NAME> \n'
' South Park | <NAME>, <NAME>, <NAME>, <NAME> '
)
assert actual == expected
|
tests/utils.py | pbulteel/python-hpilo | 245 | 11155233 | import unittest
import ConfigParser
import json
import os
import sys
import time
import re
testroot = os.path.dirname(__file__)
sys.path.insert(0, os.path.dirname(testroot))
import hpilo
import warnings
warnings.filterwarnings("ignore", category=hpilo.IloWarning)
class FirmwareCache(object):
def __init__(self):
self.cachefile = os.path.join(testroot, '.firmware_version_cache')
self.cache = {}
if os.path.exists(self.cachefile):
with open(self.cachefile) as fd:
self.cache = json.load(fd)
def __getitem__(self, ilo):
if ilo.hostname not in self.cache:
self.cache[ilo.hostname] = ilo.get_fw_version()
with open(self.cachefile, 'w') as fd:
json.dump(self.cache, fd)
return self.cache[ilo.hostname]
firmware_cache = FirmwareCache()
class IloTestCaseMeta(type):
def __new__(cls, name, bases, attrs):
attrs['ilos'] = {}
config = ConfigParser.ConfigParser()
config.read(os.path.expanduser(os.path.join('~', '.ilo.conf')))
login = config.get('ilo', 'login')
password = config.get('ilo', 'password')
methods = []
for attr in list(attrs.keys()):
if attr.startswith('test_') and callable(attrs[attr]):
attrs['_' + attr] = attrs.pop(attr)
methods.append(attr[5:])
for section in config.sections():
if not section.startswith('test '):
continue
key = section.split()[1]
hostname = config.get(section, 'ilo')
ilo = hpilo.Ilo(hostname, login, password)
ilo.firmware_version = firmware_cache[ilo]
if not ilo.protocol:
ilo.protocol = hpilo.ILO_RAW if ilo.firmware_version['management_processor'].lower() in ('ilo', 'ilo2') else hpilo.ILO_HTTP
ilo.save_response = os.path.join(testroot, 'hpilo_test_debug_output')
attrs['ilos'][key] = ilo
for method in methods:
fname = re.sub('[^a-zA-Z0-9_]', '_', 'test_%s_%s' % (key, method))
attrs[fname] = eval("lambda self: self._test_%s(self.ilos['%s'])" % (method, key))
return super(IloTestCaseMeta, cls).__new__(cls, name, bases, attrs)
class IloTestCase(unittest.TestCase):
__metaclass__ = IloTestCaseMeta
maxDiff = None
def require_ilo(self, ilo, *ilos):
for ilov in ilos:
version = None
if ':' in ilov:
ilov, version = ilov.split(':')
if ilo.firmware_version['management_processor'].lower() == ilov:
if not version or ilo.firmware_version['firmware_version'] >= version:
return True
raise unittest.SkipTest("This test requires %s, not %s:%s" % ('|'.join(ilos),
ilo.firmware_version['management_processor'].lower(), ilo.firmware_version['firmware_version']))
def reset_delay(self, ilo):
time.sleep(30)
while True:
try:
ilo.get_fw_version()
return
except hpilo.IloCommunicationError:
time.sleep(10)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.