max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
crawl_data.py | Sharad24/NeurIPS2021Datasets-OpenReviewData | 493 | 12757412 | <gh_stars>100-1000
import numpy as np
import h5py
import string
from util import crawl_meta
import time
CRAWL_DATA = True
AFTER_DECISION = False
CRAWL_REVIEW = True
# Get the meta data
meta_list = crawl_meta(
meta_hdf5=None,
write_meta_name='data_{}.hdf5'.format(time.strftime("%Y%m%d%H%M%S")),
crawl_review=CRAWL_REVIEW)
num_withdrawn = len([m for m in meta_list if m.withdrawn or m.desk_reject])
print('Number of submissions: {} (withdrawn/desk reject submissions: {})'.format(
len(meta_list), num_withdrawn))
|
pyNastran/dev/bdf_vectorized/cards/elements/bar/cbar.py | ACea15/pyNastran | 293 | 12757443 | from numpy import array, arange, zeros, unique, searchsorted, full, nan
from numpy.linalg import norm # type: ignore
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer_8 import print_card_8, set_blank_if_default
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank,
double_or_blank, integer_double_or_blank, string_or_blank)
from pyNastran.bdf.cards.elements.bars import BAROR
from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default
from pyNastran.dev.bdf_vectorized.cards.elements.element import Element
class CBAR(Element):
"""
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| CBAR | EID | PID | GA | GB | X1 | X2 | X3 | OFFT |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
or
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| CBAR | EID | PID | GA | GB | G0 | | | OFFT |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
| | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B |
+-------+-----+-----+-----+-----+-----+-----+-----+------+
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
| CBAR | 2 | 39 | 7 | 6 | 105 | | | GGG |
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
| | | 513 | 0.0+0 | 0.0+0 | -9. | 0.0+0 | 0.0+0 | -9. |
+-------+-------+-----+-------+-------+--------+-------+-------+-------+
"""
type = 'CBAR'
def __init__(self, model):
"""
Defines the CBAR object.
Parameters
----------
model : BDF
the BDF object
"""
Element.__init__(self, model)
def allocate(self, card_count):
ncards = card_count[self.type]
self.n = ncards
if self.n:
assert isinstance(ncards, int), ncards
float_fmt = self.model.float_fmt
#: Element ID
self.element_id = zeros(ncards, 'int32')
#: Property ID
self.property_id = zeros(ncards, 'int32')
self.node_ids = zeros((ncards, 2), 'int32')
self.is_g0 = zeros(ncards, 'bool')
self.g0 = full(ncards, nan, 'int32')
self.x = full((ncards, 3), nan, float_fmt)
self.offt = full(ncards, nan, '|U3')
self.pin_flags = zeros((ncards, 2), 'int32')
self.wa = zeros((ncards, 3), float_fmt)
self.wb = zeros((ncards, 3), float_fmt)
def add_card(self, card, comment=''):
i = self.i
if 0 and self.model.cbaror.n > 0:
cbaror = self.model.cbaror
pid_default = cbaror.property_id
is_g0_default = cbaror.is_g0
x1_default = cbaror.x[0]
x2_default = cbaror.x[1]
x3_default = cbaror.x[2]
g0_default = cbaror.g0
offt_default = cbaror.offt
else:
pid_default = None
is_g0_default = None
x1_default = 0.0
x2_default = 0.0
x3_default = 0.0
g0_default = None
offt_default = 'GGG'
eid = integer(card, 1, 'element_id')
self.element_id[i] = eid
if pid_default is not None:
self.property_id[i] = integer_or_blank(card, 2, 'property_id', pid_default)
else:
self.property_id[i] = integer_or_blank(card, 2, 'property_id', eid)
self.node_ids[i] = [integer(card, 3, 'GA'),
integer(card, 4, 'GB')]
#---------------------------------------------------------
# x / g0
if g0_default is not None:
field5 = integer_double_or_blank(card, 5, 'g0_x1', g0_default)
else:
field5 = integer_double_or_blank(card, 5, 'g0_x1', x1_default)
if isinstance(field5, integer_types):
self.is_g0[i] = True
self.g0[i] = field5
elif isinstance(field5, float):
self.is_g0[i] = False
x = array([field5,
double_or_blank(card, 6, 'x2', x2_default),
double_or_blank(card, 7, 'x3', x3_default)], dtype='float64')
self.x[i, :] = x
if norm(x) == 0.0:
msg = 'G0 vector defining plane 1 is not defined on CBAR %s.\n' % eid
msg += 'G0 = %s\n' % field5
msg += 'X = %s\n' % x
msg += '%s' % card
raise RuntimeError(msg)
else:
msg = ('field5 on CBAR (G0/X1) is the wrong type...id=%s field5=%s '
'type=%s' % (self.eid, field5, type(field5)))
raise RuntimeError(msg)
#---------------------------------------------------------
# offt
# bit doesn't exist on the CBAR
offt = string_or_blank(card, 8, 'offt', offt_default)
msg = 'invalid offt parameter of CBEAM...offt=%s' % offt
assert offt[0] in ['G', 'B', 'O', 'E'], msg
assert offt[1] in ['G', 'B', 'O', 'E'], msg
assert offt[2] in ['G', 'B', 'O', 'E'], msg
self.offt[i] = offt
self.pin_flags[i, :] = [integer_or_blank(card, 9, 'pa', 0),
integer_or_blank(card, 10, 'pb', 0)]
self.wa[i, :] = [double_or_blank(card, 11, 'w1a', 0.0),
double_or_blank(card, 12, 'w2a', 0.0),
double_or_blank(card, 13, 'w3a', 0.0),]
self.wb[i, :] = [double_or_blank(card, 14, 'w1b', 0.0),
double_or_blank(card, 15, 'w2b', 0.0),
double_or_blank(card, 16, 'w3b', 0.0),]
assert len(card) <= 17, 'len(CBAR card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self.is_g0 = self.is_g0[i]
self.g0 = self.g0[i]
self.x = self.x[i, :]
self.offt = self.offt[i]
self.pin_flags = self.pin_flags[i, :]
self.wa = self.wa[i, :]
self.wb = self.wb[i, :]
unique_eids = unique(self.element_id)
if len(unique_eids) != len(self.element_id):
raise RuntimeError('There are duplicate CBAR IDs...')
self._cards = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
nid_map = maps['node']
pid_map = maps['property']
for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id,
self.node_ids)):
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
#=========================================================================
def get_mass_by_element_id(self, grid_cid0=None, total=False):
"""
mass = rho * A * L + nsm
"""
if self.n == 0:
return 0.0
return [0.0]
if grid_cid0 is None:
grid_cid0 = self.model.grid.get_position_by_node_index()
p1 = grid_cid0[self.node_ids[:, 0]]
p2 = grid_cid0[self.node_ids[:, 1]]
L = p2 - p1
i = self.model.properties_bar.get_index(self.property_id)
A = self.model.properties_bar.get_Area[i]
material_id = self.model.properties_bar.material_id[i]
rho, E, J = self.model.Materials.get_rho_E_J(material_id)
rho = self.model.Materials.get_rho(self.mid)
E = self.model.Materials.get_E(self.mid)
J = self.model.Materials.get_J(self.mid)
mass = norm(L, axis=1) * A * rho + self.nsm
if total:
return mass.sum()
else:
return mass
#=========================================================================
def write_card(self, bdf_file, size=8, element_ids=None):
if self.n:
if element_ids is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, self.element_id)
for (eid, pid, n, is_g0, g0, x, offt, pin, wa, wb) in zip(
self.element_id[i], self.property_id[i], self.node_ids[i],
self.is_g0[i], self.g0[i], self.x[i],
self.offt[i],
self.pin_flags[i], self.wa[i], self.wb[i]):
pa = set_blank_if_default(pin[0], 0)
pb = set_blank_if_default(pin[1], 0)
w1a = set_blank_if_default(wa[0], 0.0)
w2a = set_blank_if_default(wa[1], 0.0)
w3a = set_blank_if_default(wa[2], 0.0)
w1b = set_blank_if_default(wb[0], 0.0)
w2b = set_blank_if_default(wb[1], 0.0)
w3b = set_blank_if_default(wb[2], 0.0)
x1 = g0 if is_g0 else x[0]
x2 = 0 if is_g0 else x[1]
x3 = 0 if is_g0 else x[2]
offt = set_string8_blank_if_default(offt, 'GGG')
card = ['CBAR', eid, pid, n[0], n[1], x1, x2, x3, offt,
pa, pb, w1a, w2a, w3a, w1b, w2b, w3b]
if size == 8:
bdf_file.write(print_card_8(card))
else:
bdf_file.write(print_card_16(card))
def slice_by_index(self, i):
i = self._validate_slice(i)
obj = CBAR(self.model)
obj.n = len(i)
#obj._cards = self._cards[i]
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.element_id = self.element_id[i]
obj.property_id = self.property_id[i]
obj.node_ids = self.node_ids[i, :]
obj.is_g0 = self.is_g0[i]
obj.g0 = self.g0[i]
obj.x = self.x[i, :]
obj.offt = self.offt[i]
obj.pin_flags = self.pin_flags[i]
obj.wa = self.wa[i]
obj.wb = self.wb[i]
return obj
#def get_stiffness_matrix(self, model, node_ids, index0s, fnorm=1.0):
#return K, dofs, n_ijv
|
cea/utilities/doc_graphviz.py | architecture-building-systems/cea-toolbox | 121 | 12757452 | <gh_stars>100-1000
"""
doc_graphviz.py
Creates the graphviz output used to visualize script dependencies.
This file relies on the schemas.yml to create the graphviz plots.
"""
import os
import cea.config
import cea.schemas
from jinja2 import Template
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "2.14"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def create_graphviz_files(graphviz_data, documentation_dir):
"""
:param dict graphviz_data: maps script names to a set of
(input/output, script, locator_method, folder_name, file_name)
:param documentation_dir: folder with the documentation in it ($repo/docs)
:return: None
"""
if os.path.exists(os.path.join(documentation_dir, "graphviz")):
for fname in os.listdir(os.path.join(documentation_dir, "graphviz")):
print("deleting {fname}".format(fname=fname))
os.remove(os.path.join(documentation_dir, "graphviz", fname))
for script_name in graphviz_data:
print("Creating graph for: {script_name}".format(**locals()))
# creating new variable to preserve original trace_data used by other methods
trace_data = shorten_trace_data_paths(sorted(graphviz_data[script_name]))
trace_data = unique_users_creators(trace_data)
# set of unique scripts
scripts = sorted(set([td[1] for td in trace_data]))
# set of common dirs for each file accessed by the script(s)
db_group = sorted(set(td[3] for td in trace_data))
# float containing the node width for the largest file name
width = 5
# jinja2 template setup and execution
template_path = os.path.join(documentation_dir, "templates", "graphviz_template.gv")
template = Template(open(template_path, 'r').read())
digraph = template.render(tracedata=trace_data, script_name=script_name, scripts=scripts, db_group=db_group,
width=width)
digraph = remove_extra_lines(digraph)
with open(os.path.join(documentation_dir, "graphviz", "{script}.gv".format(script=script_name)), 'w') as f:
f.write(digraph)
def unique_users_creators(trace_data):
"""
Make sure that the data does not define the same script as producer _and_ consumer at the same time. Prefer
producer.
:param trace_data: list of tuples of form (0:input/output, 1:script, 2:locator_method, 3:folder_name, 4:file_name)
:return: trace_data, filtered
"""
input_lms = set(t[2] for t in trace_data if t[0] == "input")
trace_data = [t for t in trace_data if t[0] == "input" or t[2] not in input_lms]
return trace_data
def remove_extra_lines(digraph):
digraph = "\n".join([line for line in digraph.split('\n') if len(line.strip())])
return digraph
def shorten_trace_data_paths(trace_data):
"""
Shorten the paths in trace_data to max 3 components
:param trace_data:
:return:
"""
for i, (direction, _script, method, path, db) in enumerate(trace_data):
path = "/".join(path.rsplit('/')[-3:]) # only keep max last 3 components
trace_data[i] = (direction, _script, method, path, db)
return trace_data
def get_list_of_digraphs(documentation_dir, schema_scripts):
list_of_digraphs = []
for script in schema_scripts:
graphviz_file = os.path.join(documentation_dir, 'graphviz/%s.gv' % script)
if os.path.isfile(graphviz_file):
underline = '-' * len(script)
with open(graphviz_file) as viz:
digraph = viz.read()
contents = [[script, underline, digraph]]
list_of_digraphs.extend(contents)
return list_of_digraphs
def main(_):
schemas = cea.schemas.schemas(plugins=[])
schema_scripts = cea.schemas.get_schema_scripts(plugins=[])
documentation_dir = os.path.join(os.path.dirname(cea.config.__file__), '..', 'docs')
graphviz_data = {}
for script in schema_scripts:
trace_data = set()
for locator_method in schemas:
file_path = schemas[locator_method]['file_path']
file_name = os.path.basename(file_path)
folder_name = os.path.dirname(file_path)
if script in schemas[locator_method]['created_by']:
trace_data.add(('output', script, locator_method, folder_name, file_name))
if script in schemas[locator_method]['used_by']:
trace_data.add(('input', script, locator_method, folder_name, file_name))
graphviz_data[script] = trace_data
create_graphviz_files(graphviz_data, documentation_dir)
list_of_digraphs = get_list_of_digraphs(documentation_dir=documentation_dir, schema_scripts=schema_scripts)
template_path = os.path.join(documentation_dir, "templates", "graphviz_template.rst")
template = Template(open(template_path, 'r').read())
with open(os.path.join(documentation_dir, 'script-data-flow.rst'), 'w') as fp:
fp.write(template.render(list_of_digraphs=list_of_digraphs))
if __name__ == '__main__':
main(cea.config.Configuration())
|
cpgf/samples/irrlicht/05.userinterface.py | mousepawmedia/libdeps | 187 | 12757457 | cpgf._import(None, "builtin.debug");
cpgf._import(None, "builtin.core");
class SAppContext:
device = None,
counter = 0,
listbox = None
Context = SAppContext();
GUI_ID_QUIT_BUTTON = 101;
GUI_ID_NEW_WINDOW_BUTTON = 102;
GUI_ID_FILE_OPEN_BUTTON = 103;
GUI_ID_TRANSPARENCY_SCROLL_BAR = 104;
def makeMyEventReceiver(receiver) :
def OnEvent(me, event) :
if event.EventType == irr.EET_GUI_EVENT :
id = event.GUIEvent.Caller.getID();
env = Context.device.getGUIEnvironment();
if event.GUIEvent.EventType == irr.EGET_SCROLL_BAR_CHANGED :
if id == GUI_ID_TRANSPARENCY_SCROLL_BAR :
pos = cpgf.cast(event.GUIEvent.Caller, irr.IGUIScrollBar).getPos();
skin = env.getSkin();
for i in range(irr.EGDC_COUNT) :
col = skin.getColor(i);
col.setAlpha(pos);
skin.setColor(i, col);
elif event.GUIEvent.EventType == irr.EGET_BUTTON_CLICKED :
if id == GUI_ID_QUIT_BUTTON :
Context.device.closeDevice();
return True;
elif id == GUI_ID_NEW_WINDOW_BUTTON :
Context.listbox.addItem("Window created");
Context.counter = Context.counter + 30;
if Context.counter > 200 :
Context.counter = 0;
window = env.addWindow(irr.rect_s32(100 + Context.counter, 100 + Context.counter, 300 + Context.counter, 200 + Context.counter), False, "Test window");
env.addStaticText("Please close me", irr.rect_s32(35,35,140,50), True, False, window);
return True;
elif id == GUI_ID_FILE_OPEN_BUTTON :
Context.listbox.addItem("File open");
env.addFileOpenDialog("Please choose a file.");
return True;
return False;
receiver.OnEvent = OnEvent;
def start() :
driverType = irr.driverChoiceConsole();
if driverType == irr.EDT_COUNT :
return 1;
device = irr.createDevice(driverType, irr.dimension2d_u32(640, 480));
if device == None :
return 1;
device.setWindowCaption("cpgf Irrlicht Python Binding - User Interface Demo");
device.setResizable(True);
driver = device.getVideoDriver();
env = device.getGUIEnvironment();
skin = env.getSkin();
font = env.getFont("../../media/fonthaettenschweiler.bmp");
if font :
skin.setFont(font);
skin.setFont(env.getBuiltInFont(), irr.EGDF_TOOLTIP);
env.addButton(irr.rect_s32(10,240,110,240 + 32), None, GUI_ID_QUIT_BUTTON, "Quit", "Exits Program");
env.addButton(irr.rect_s32(10,280,110,280 + 32), None, GUI_ID_NEW_WINDOW_BUTTON, "New Window", "Launches a Window");
env.addButton(irr.rect_s32(10,320,110,320 + 32), None, GUI_ID_FILE_OPEN_BUTTON, "File Open", "Opens a file");
env.addStaticText("Transparent Control:", irr.rect_s32(150,20,350,40), True);
scrollbar = env.addScrollBar(True, irr.rect_s32(150, 45, 350, 60), None, GUI_ID_TRANSPARENCY_SCROLL_BAR);
scrollbar.setMax(255);
scrollbar.setPos(env.getSkin().getColor(irr.EGDC_WINDOW).getAlpha());
env.addStaticText("Logging ListBox:", irr.rect_s32(50,110,250,130), True);
listbox = env.addListBox(irr.rect_s32(50, 140, 250, 210));
env.addEditBox("Editable Text", irr.rect_s32(350, 80, 550, 100));
Context.device = device;
Context.counter = 0;
Context.listbox = listbox;
MyEventReceiver = cpgf.cloneClass(irr.IEventReceiverWrapper);
makeMyEventReceiver(MyEventReceiver);
receiver = MyEventReceiver();
device.setEventReceiver(receiver);
env.addImage(driver.getTexture("../../media/irrlichtlogo2.png"), irr.position2d_s32(10,10));
while device.run() and driver :
if device.isWindowActive() :
driver.beginScene(True, True, irr.SColor(0,200,200,200));
env.drawAll();
driver.endScene();
device.drop();
return 0;
start();
|
tests/test_import.py | Attsun1031/schematics | 1,430 | 12757487 | # -*- coding: utf-8 -*-
from copy import deepcopy
import pytest
from schematics.models import Model
from schematics.types import *
from schematics.types.compound import *
from schematics.exceptions import *
from schematics.undefined import Undefined
@pytest.mark.parametrize('init', (True, False))
def test_import_data(init):
class M(Model):
a, b, c, d = IntType(), IntType(), IntType(), IntType()
m = M({
'a': 1,
'b': None,
'c': 3
}, init=init)
m.import_data({
'a': None,
'b': 2
})
if init:
assert m._data == {'a': None, 'b': 2, 'c': 3, 'd': None}
else:
assert m._data == {'a': None, 'b': 2, 'c': 3}
@pytest.mark.parametrize('init', (True, False))
def test_import_data_with_error(init):
class M(Model):
a, b, c, d = IntType(), IntType(), IntType(required=True), IntType()
m = M({
'a': 1,
'b': None,
'c': 3
}, init=init)
with pytest.raises(DataError):
m.import_data({
'a': None,
'b': 2,
'c': None,
})
if init:
assert m._data == {'a': 1, 'b': None, 'c': 3, 'd': None}
else:
assert m._data == {'a': 1, 'b': None, 'c': 3}
@pytest.mark.parametrize('preconvert_source, populate_source',
[( False, None),
( True, True),
( True, False)])
@pytest.mark.parametrize('recursive, populate_target, init_to_none, populated_result',
[( False, True, True, True),
( False, False, False, False),
( True, True, True, True),
( True, False, True, True),
( True, False, False, False)])
def test_complex_import_data(recursive, preconvert_source, populate_source, populate_target,
init_to_none, populated_result):
class M(Model):
intfield = IntType(max_value=2)
matrixfield = ListType(ListType(IntType))
dictfield = DictType(IntType)
modelfield = ModelType('M')
origdict = {
'intfield': '1',
'dictfield': dict(a=1, b=2),
'modelfield': {
'intfield': '2',
'matrixfield': [[0, 0, 0], [1, 1, 1], [2, 2, 2]],
'dictfield': dict(a=11, b=22),
'modelfield': {
'intfield': '3',
'dictfield': dict(a=111, b=222)}}}
m = M(origdict, init=populate_target)
sourcedict = {
'intfield': '101',
'dictfield': dict(c=3),
'modelfield': {
'matrixfield': [[9]],
'modelfield': {
'intfield': '103',
'dictfield': dict(c=33)}}}
sourcedata = deepcopy(sourcedict)
if preconvert_source:
sourcedata = M(sourcedata, init=populate_source)
m.import_data(sourcedata, recursive=recursive, init_values=init_to_none)
assert id(m) != id(sourcedata)
if preconvert_source and populate_source:
assert m == M(sourcedict, init=True)
elif recursive:
assert m == M({
'intfield': '101',
'dictfield': dict(c=3),
'modelfield': {
'intfield': '2',
'matrixfield': [[9]],
'dictfield': dict(a=11, b=22),
'modelfield': {
'intfield': '103',
'dictfield': dict(c=33)}}}, init=populated_result)
else:
assert m == M(sourcedict, init=populated_result)
|
tests/algorithms/memory/test_bam.py | FrostByte266/neupy | 801 | 12757494 | import pickle
import numpy as np
from neupy import algorithms
from neupy.exceptions import NotTrained
from algorithms.memory.data import zero, one, half_one, half_zero
from base import BaseTestCase
from helpers import vectors_for_testing
zero_hint = np.array([[0, 1, 0, 0]])
one_hint = np.array([[1, 0, 0, 0]])
class BAMTestCase(BaseTestCase):
def setUp(self):
super(BAMTestCase, self).setUp()
self.data = np.concatenate([zero, one], axis=0)
self.hints = np.concatenate([zero_hint, one_hint], axis=0)
def test_bam_exceptions(self):
with self.assertRaises(NotTrained):
dbnet = algorithms.DiscreteBAM()
dbnet.predict(np.array([0, 1]))
with self.assertRaises(NotTrained):
dbnet = algorithms.DiscreteBAM()
dbnet.predict_input(np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet = algorithms.DiscreteBAM()
dbnet.weight = np.array([[0, 1], [1, 0]])
dbnet.train(np.array([0, 1, 1]), np.array([0, 1]))
def test_bam_X_validation(self):
dbnet = algorithms.DiscreteBAM()
dbnet.weight = np.array([[0, 1], [1, 0]])
with self.assertRaises(ValueError):
# Invalid discrete input values
dbnet.train(np.array([-1, 1]), np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet.train(np.array([0, 1]), np.array([-1, 1]))
with self.assertRaises(ValueError):
dbnet.energy(np.array([-1, 1]), np.array([0, 1]))
with self.assertRaises(ValueError):
dbnet.energy(np.array([0, 1]), np.array([-1, 1]))
with self.assertRaises(ValueError):
dbnet.predict(np.array([-1, 1]))
def test_discrete_bam_storage(self):
network = algorithms.DiscreteBAM(mode='sync')
network.train(self.data, self.hints)
stored_network = pickle.dumps(network)
loaded_network = pickle.loads(stored_network)
network_prediction = network.predict(self.data)
loaded_network_prediction = loaded_network.predict(self.data)
np.testing.assert_array_almost_equal(
loaded_network_prediction[0], network_prediction[0])
np.testing.assert_array_almost_equal(
loaded_network_prediction[1], network_prediction[1])
def test_discrete_bam_sync(self):
bamnet = algorithms.DiscreteBAM(mode='sync')
bamnet.train(self.data, self.hints)
data_before = self.data.copy()
hints_before = self.hints.copy()
np.testing.assert_array_almost_equal(
bamnet.predict(half_zero)[1],
zero_hint
)
np.testing.assert_array_almost_equal(
bamnet.predict_output(half_one)[1],
one_hint
)
np.testing.assert_array_almost_equal(
bamnet.predict_input(zero_hint)[0],
zero
)
np.testing.assert_array_almost_equal(
bamnet.predict_input(one_hint)[0],
one
)
# Test 1d input array prediction
np.testing.assert_array_almost_equal(
bamnet.predict_input(one_hint.ravel())[0],
one
)
# Test 1d output array input prediction
np.testing.assert_array_almost_equal(
bamnet.predict_output(half_one.ravel())[1],
one_hint
)
# Test multiple input values prediction
input_matrix = np.vstack([one, zero])
output_matrix = np.vstack([one_hint, zero_hint])
output_matrix_before = output_matrix.copy()
input_matrix_before = input_matrix.copy()
np.testing.assert_array_almost_equal(
bamnet.predict_input(output_matrix)[0],
input_matrix
)
np.testing.assert_array_almost_equal(
bamnet.predict(input_matrix)[1],
output_matrix
)
np.testing.assert_array_equal(self.data, data_before)
np.testing.assert_array_equal(self.hints, hints_before)
np.testing.assert_array_equal(output_matrix, output_matrix_before)
np.testing.assert_array_equal(input_matrix, input_matrix_before)
def test_discrete_bam_async(self):
bamnet = algorithms.DiscreteBAM(mode='async', n_times=400)
data_before = self.data.copy()
hints_before = self.hints.copy()
bamnet.train(self.data, self.hints)
input_matrix = np.vstack([one, zero])
output_matrix = np.vstack([one_hint, zero_hint])
output_matrix_before = output_matrix.copy()
input_matrix_before = input_matrix.copy()
np.testing.assert_array_almost_equal(
bamnet.predict_input(output_matrix)[0],
input_matrix
)
np.testing.assert_array_almost_equal(
bamnet.predict_output(input_matrix)[1],
output_matrix
)
np.testing.assert_array_equal(self.data, data_before)
np.testing.assert_array_equal(self.hints, hints_before)
np.testing.assert_array_equal(output_matrix, output_matrix_before)
np.testing.assert_array_equal(input_matrix, input_matrix_before)
def test_bam_argument_in_predict_method(self):
dbnet = algorithms.DiscreteBAM(mode='async', n_times=1)
dbnet.train(self.data, self.hints)
self.assertTrue(np.any(one != dbnet.predict_output(half_one)[0]))
np.testing.assert_array_almost_equal(
one, dbnet.predict_output(half_one, n_times=100)[0])
def test_bam_energy_function(self):
input_vector = np.array([[1, 0, 0, 1, 1, 0, 0]])
output_vector = np.array([[1, 0]])
dbnet = algorithms.DiscreteBAM()
dbnet.train(input_vector, output_vector)
self.assertEqual(-7, dbnet.energy(input_vector, output_vector))
self.assertEqual(0, dbnet.energy(
np.array([[0, 0, 0, 0, 0, 0, 0]]),
np.array([[0, 0]])
))
self.assertEqual(-7, dbnet.energy(
np.array([[0, 1, 1, 0, 0, 1, 1]]),
np.array([[0, 1]])
))
# Test 1d array
self.assertEqual(-7, dbnet.energy(
np.array([0, 1, 1, 0, 0, 1, 1]),
np.array([0, 1])
))
# Test multiple input values energy calculation
np.testing.assert_array_almost_equal(
np.array([-7, 0]),
dbnet.energy(
np.array([
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
]),
np.array([
[0, 1],
[0, 0],
])
)
)
def test_bam_train_different_inputs(self):
self.assertInvalidVectorTrain(
algorithms.DiscreteBAM(),
np.array([1, 0, 0, 1]),
np.array([1, 0]),
is_feature1d=False)
def test_bam_predict_different_inputs(self):
bamnet = algorithms.DiscreteBAM()
data = np.array([[1, 0, 0, 1]])
target = np.array([[1, 0]])
bamnet.train(data, target)
test_vectors = vectors_for_testing(
data.reshape(data.size), is_feature1d=False)
for test_vector in test_vectors:
np.testing.assert_array_almost_equal(
bamnet.predict(test_vector)[1], target)
|
venv/Lib/site-packages/IPython/core/magics/auto.py | ajayiagbebaku/NFL-Model | 6,989 | 12757506 | """Implementation of magic functions that control various automatic behaviors.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.core.magic import Bunch, Magics, magics_class, line_magic
from IPython.testing.skipdoctest import skip_doctest
from logging import error
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class AutoMagics(Magics):
"""Magics that control various autoX behaviors."""
def __init__(self, shell):
super(AutoMagics, self).__init__(shell)
# namespace for holding state we may need
self._magic_state = Bunch()
@line_magic
def automagic(self, parameter_s=''):
"""Make magic functions callable without having to type the initial %.
Without arguments toggles on/off (when off, you must call it as
%automagic, of course). With arguments it sets the value, and you can
use any of (case insensitive):
- on, 1, True: to activate
- off, 0, False: to deactivate.
Note that magic functions have lowest priority, so if there's a
variable whose name collides with that of a magic fn, automagic won't
work for that function (you get the variable instead). However, if you
delete the variable (del var), the previously shadowed magic function
becomes visible to automagic again."""
arg = parameter_s.lower()
mman = self.shell.magics_manager
if arg in ('on', '1', 'true'):
val = True
elif arg in ('off', '0', 'false'):
val = False
else:
val = not mman.auto_magic
mman.auto_magic = val
print('\n' + self.shell.magics_manager.auto_status())
@skip_doctest
@line_magic
def autocall(self, parameter_s=''):
"""Make functions callable without having to type parentheses.
Usage:
%autocall [mode]
The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
value is toggled on and off (remembering the previous state).
In more detail, these values mean:
0 -> fully disabled
1 -> active, but do not apply if there are no arguments on the line.
In this mode, you get::
In [1]: callable
Out[1]: <built-in function callable>
In [2]: callable 'hello'
------> callable('hello')
Out[2]: False
2 -> Active always. Even if no arguments are present, the callable
object is called::
In [2]: float
------> float()
Out[2]: 0.0
Note that even with autocall off, you can still use '/' at the start of
a line to treat the first argument on the command line as a function
and add parentheses to it::
In [8]: /str 43
------> str(43)
Out[8]: '43'
# all-random (note for auto-testing)
"""
if parameter_s:
arg = int(parameter_s)
else:
arg = 'toggle'
if not arg in (0, 1, 2, 'toggle'):
error('Valid modes: (0->Off, 1->Smart, 2->Full')
return
if arg in (0, 1, 2):
self.shell.autocall = arg
else: # toggle
if self.shell.autocall:
self._magic_state.autocall_save = self.shell.autocall
self.shell.autocall = 0
else:
try:
self.shell.autocall = self._magic_state.autocall_save
except AttributeError:
self.shell.autocall = self._magic_state.autocall_save = 1
print("Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall])
|
lib/dynamic_screening_solutions/utils.py | goztrk/django-htk | 206 | 12757510 | # Python Standard Library Imports
import base64
import hashlib
import hmac
import json
# HTK Imports
from htk.utils import htk_setting
from htk.utils.general import resolve_method_dynamically
def validate_webhook_request(request):
"""Validates a 321Forms webhook request
Returns a JSON request body if it is valid
Otherwise, returns None
"""
webhook_data = json.loads(request.body)
company_id = webhook_data.get('company', {}).get('id')
headers = request.META
expected_signature = headers.get('HTTP_X_ONBOARDING_SIGNATURE', '')
hash_key_retriever = resolve_method_dynamically(htk_setting('HTK_321FORMS_WEBHOOK_HASH_KEY_RETRIEVER'))
hash_key = hash_key_retriever(company_id)
signature = base64.b64encode(
hmac.new(
bytes(hash_key),
request.body,
digestmod=hashlib.sha1
).digest()
)
is_valid = signature == expected_signature
if is_valid:
webhook_data = webhook_data
else:
webhook_data = None
return webhook_data
def handle_webhook_request(webhook_data):
topic = webhook_data.get('topic', None)
event_handlers = htk_setting('HTK_321FORMS_WEBHOOK_EVENT_HANDLERS')
event_handler_method = event_handlers.get(topic)
event_handler = resolve_method_dynamically(event_handler_method) if event_handler_method else None
if event_handler:
event_handler(webhook_data)
else:
pass
|
server.py | Totsui/Voice_cloner | 113 | 12757511 | import socket
import os
from playsound import playsound
from pydub import AudioSegment
def sendToClient(msg):
msg = msg.decode('utf-8')
lang = msg[:3] # ITA or ENG
msg = msg[3:] # actual message
words = msg.split(" ")
if len(words) > 18:
sentences = []
sentence = ""
for i in range(len(words)):
sentence += words[i] + " "
if i%12 == 0 and i != 0:
sentences.append(sentence)
sentence = ""
elif i == len(words)-1:
sentences.append(sentence)
with open('harvard_sentences.txt','w') as f:
first = True
for i, sentence in enumerate(sentences, start=1):
if first:
f.write("first line\n1. "+str(sentence)+"\n")
first = False
else:
f.write(f"{i}. {str(sentence)}\n")
num_sentences = len(sentences)
else:
with open('harvard_sentences.txt','w') as f:
f.write("first line\n1. "+str(msg)+"\n")
num_sentences = 1
os.system('python synthesize.py '+lang)
sounds = 0
for i in range(0, num_sentences):
sounds += AudioSegment.from_wav(f"samples/{i+1}.wav")
# increase volume by 10dB
sounds += 10
sounds.export("backup/final.wav", format="wav")
f.close()
with open('backup/final.wav', 'rb') as f:
audiob = f.read()
clientsocket.send(audiob)
clientsocket.close()
f.close()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("0.0.0.0", 1234))
s.listen(5)
while True:
print("Waiting for connection...")
clientsocket, address = s.accept()
print(f"Connection from {address} has been established")
msg = clientsocket.recv(2048)
print(msg)
sendToClient(msg)
|
jchart/tests.py | monasysinfo/django-jchart | 125 | 12757514 | <reponame>monasysinfo/django-jchart<filename>jchart/tests.py
import json
from django.test import TestCase, RequestFactory
from django.utils import six
from django.core.exceptions import ImproperlyConfigured
from .views import ChartView
from . import Chart
from .config import (Title, Legend, Tooltips, Hover,
InteractionModes, Animation, Element,
ElementArc, Axes, ScaleLabel, Tick, rgba)
class LineChart(Chart):
chart_type = 'line'
title = Title(text='Test Title Line')
legend = Legend(display=False)
tooltips = Tooltips(enabled=False)
hover = Hover(mode='default')
animation = Animation(duration=1.0)
scales = {
'xAxes': [Axes(display=False, type='time', position='bottom')],
'yAxes': [Axes(type='linear',
position='left',
scaleLabel=ScaleLabel(fontColor='#fff'),
ticks=Tick(fontColor='#fff')
)],
}
def get_datasets(self, *args, **kwargs):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
return [dict(label='Test Line Chart', data=data)]
class LineChartParameterized(LineChart):
def get_datasets(self, currency_type):
eur_data = list(range(10))
do_data = list(range(10, 20))
if currency_type == 'euro':
return [dict(label='Euro Chart', data=eur_data)]
elif currency_type == 'dollar':
return [dict(label='Dollar Chart', data=do_data)]
raise ValueError('Unkown currency type: {}'.format(currency_type))
class LineChartUnresponsive(LineChart):
responsive = False
class BarChart(Chart):
chart_type = 'radar'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Radar Chart', data=data)]
class PolarChart(Chart):
chart_type = 'polarArea'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Polar Chart', data=data)]
class RadarChart(Chart):
chart_type = 'bar'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Line Chart', data=data)]
class PieChart(Chart):
chart_type = 'pie'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Pie Chart', data=data)]
class BubbleChart(Chart):
chart_type = 'bubble'
title = Title(text='Test Title')
def get_datasets(self, *args, **kwargs):
data = []
return [dict(label='Test Bubble Chart', data=data)]
class OptionsChart(Chart):
chart_type = 'line'
title = Title(text='Precendence')
options = {
'title': Title(text='Overriden'),
'responsive': True,
'maintainAspectRatio': True,
}
def get_datasets(self, *args, **kwargs):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9]
return [dict(label='Test Line Chart', data=data)]
class ChartViewTestToolkit(TestCase):
classes = None
url_kwargs = {}
@property
def request(self):
request_factory = RequestFactory()
return request_factory.get('/test-url')
@property
def responses(self):
for klass in self.classes:
yield ChartView.from_chart(klass())(self.request, **self.url_kwargs)
class ChartViewTestToolkitSolo(ChartViewTestToolkit):
klass = None
url_kwargs = {}
@property
def response(self):
return ChartView.from_chart(self.klass())(self.request, **self.url_kwargs)
return self.klass.as_view()(self.request)
@property
def data(self):
charset = getattr(self.response, 'charset', 'utf-8')
data = self.response.content.decode(charset)
return json.loads(data)
class ChartResponseTestCase(ChartViewTestToolkit):
classes = (
LineChart,
BarChart,
PolarChart,
RadarChart,
PieChart,
BubbleChart,
)
def test_status_code(self):
for response in self.responses:
self.assertEquals(response.status_code, 200)
def test_content_type(self):
for response in self.responses:
self.assertEquals(response.get('content-type'), 'application/json')
def test_chart_config(self):
for response in self.responses:
charset = getattr(response, 'charset', 'utf-8')
content = response.content.decode(charset)
data = json.loads(content)
self.assertIn('data', data)
self.assertIn('options', data)
self.assertIn('type', data)
self.assertTrue(isinstance(data['data'], dict))
self.assertTrue(isinstance(data['options'], dict))
self.assertTrue(isinstance(data['type'], (six.string_types, six.text_type)))
self.assertIn(data['type'], ['bar', 'line', 'radar', 'polarArea', 'pie', 'bubble'])
self.assertIn('title', data['options'])
class LineChartTestCase(ChartViewTestToolkitSolo):
klass = LineChart
def test_title(self):
self.assertEquals(self.data['options']['title']['text'], 'Test Title Line')
def test_legend(self):
self.assertEquals(self.data['options']['legend']['display'], False)
def test_tooltips(self):
self.assertEquals(self.data['options']['tooltips']['enabled'], False)
def test_hover(self):
self.assertEquals(self.data['options']['hover']['mode'], 'default')
def test_animation(self):
self.assertEquals(self.data['options']['animation']['duration'], 1.0)
def test_dataset(self):
self.assertEquals(len(self.data['data']['datasets']), 1)
self.assertEquals(len(self.data['data']['labels']), 0)
self.assertEquals(self.data['data']['datasets'][0]['data'], list(range(1, 10)))
class TestConfigADTS(TestCase):
def test_rgba(self):
self.assertEquals(rgba(255, 255, 255), 'rgba(255,255,255,1.0)')
self.assertEquals(rgba(255, 255, 255, 0.0), 'rgba(255,255,255,0.0)')
def test_title(self):
title = Title(text='Hello World')
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Title(nonsense='something'))
def test_legend(self):
title = Legend(display=False)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Legend(nonsense='something'))
def test_tooltips(self):
title = Tooltips(enabled=True)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Tooltips(nonsense='something'))
def test_hover(self):
title = Hover(mode='default')
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Hover(nonsense='something'))
def test_interaction_modes(self):
title = InteractionModes(label='Hello World')
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: InteractionModes(nonsense='something'))
def test_animation(self):
title = Animation(duration=1.0)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Animation(nonsense='something'))
def test_element(self):
arc = ElementArc(borderColor=rgba(255, 255, 255, 1))
title = Element(arc=arc)
self.assertTrue(isinstance(title, dict))
self.assertRaises(ValueError, lambda: Element(nonsense='something'))
def test_scales(self):
axes = Axes(type='linear',
position='left',
scaleLabel=ScaleLabel(fontColor='#fff'),
ticks=Tick(fontColor='#fff')
)
self.assertTrue(isinstance(axes, dict))
self.assertRaises(ValueError, lambda: Axes(nonsense='something'))
class ChartViewTestCase(TestCase):
def test_chart_view(self):
self.assertTrue(getattr(ChartView, 'from_chart', False))
self.assertRaises(ImproperlyConfigured,
lambda: ChartView())
def test_chart_view_from_chart_classonly(self):
ChartViewSubClass = type('ChartViewSubClass', (ChartView, ), {
'chart_instance': LineChart()
})
chart_view = ChartViewSubClass()
self.assertRaises(AttributeError,
lambda: chart_view.from_chart(LineChart()))
def test_chart_view_from_chart(self):
self.assertRaises(ImproperlyConfigured,
lambda: ChartView.from_chart(dict()))
self.assertRaises(ImproperlyConfigured,
lambda: ChartView.from_chart(LineChart))
ChartView.from_chart(LineChart())
def test_chart_view_get(self):
ChartViewSubClass = type('ChartViewSubClass', (ChartView, ), {
'chart_instance': LineChart()
})
chart_view = ChartViewSubClass()
request_factory = RequestFactory()
request = request_factory.get('/test-url')
response = chart_view.get(request)
self.assertEquals(response.status_code, 200)
charset = getattr(response, 'charset', 'utf-8')
content = response.content.decode(charset)
data = json.loads(content)
self.assertIn('data', data)
self.assertIn('options', data)
self.assertIn('type', data)
self.assertTrue(isinstance(data['data'], dict))
self.assertTrue(isinstance(data['options'], dict))
self.assertTrue(isinstance(data['type'], (six.string_types, six.text_type)))
self.assertIn(data['type'], ['bar', 'line', 'radar', 'polarArea', 'pie', 'bubble'])
self.assertIn('title', data['options'])
class ChartTestCase(TestCase):
def test_chart_dimension(self):
line_chart = LineChartUnresponsive(width=1000, height=500)
self.assertEquals(line_chart.width, 1000)
self.assertEquals(line_chart.height, 500)
self.assertIn('height: 500px', line_chart.as_html())
self.assertIn('width: 1000px', line_chart.as_html())
def test_chart_no_dimension(self):
line_chart = LineChart()
self.assertEquals(line_chart.width, None)
self.assertEquals(line_chart.height, None)
self.assertNotIn('height:', line_chart.as_html())
self.assertNotIn('width:', line_chart.as_html())
def test_chart_html_id(self):
line_chart = LineChart(html_id='test-id')
self.assertIn('id="test-id"', line_chart.as_html())
def test_chart_render_html(self):
line_chart = LineChart()
context = {
'html_id': 'test-id',
'chart': line_chart,
'chart_configuration': line_chart.get_configuration(),
}
html = line_chart.render_html(context)
self.assertNotIn('<script', html)
def test_chart_render_js(self):
line_chart = LineChart()
context = {
'html_id': 'test-id',
'chart': line_chart,
'chart_configuration': line_chart.get_configuration(),
}
js = line_chart.render_js(context)
self.assertNotIn('<canvas', js)
def test_responsive_height_width(self):
LineChartUnresponsive(height=500)
self.assertRaises(ImproperlyConfigured,
lambda: LineChart(height=500))
def test_chart_parameterization(self):
chart = LineChartParameterized()
self.assertNotIn('Dollar Chart', chart.as_html('euro'))
self.assertIn('Euro Chart', chart.as_html('euro'))
self.assertNotIn('Euro Chart', chart.as_html('dollar'))
self.assertIn('Dollar Chart', chart.as_html('dollar'))
class AsyncChartParameterization(ChartViewTestToolkitSolo):
klass = LineChartParameterized
def test_euro(self):
self.url_kwargs = dict(currency_type='euro')
self.assertEquals('Euro Chart',
self.data['data']['datasets'][0]['label'])
def test_dollar(self):
self.url_kwargs = dict(currency_type='dollar')
self.assertEquals('Dollar Chart',
self.data['data']['datasets'][0]['label'])
class OptionsChartTestCase(ChartViewTestToolkitSolo):
klass = OptionsChart
def test_precedence(self):
title = self.data['options']['title']['text']
responsive = self.data['options']['responsive']
maintainAspectRatio = self.data['options']['maintainAspectRatio']
self.assertEquals('Precendence', title)
self.assertTrue(responsive)
self.assertTrue(maintainAspectRatio)
|
AutotestWebD/apps/common/func/send_mail.py | yangjourney/sosotest | 422 | 12757524 | from AutotestWebD.settings import EMAIL_SENDER,EMAIL_PASSWORD,EMAIL_SERVER,EMAIL_USERNAME
import smtplib
import traceback
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.header import Header
import os
import email.encoders
import time
import traceback
import re
def send_mail(email_list, subject, email_text, filepath="", sub_type="text"):
try:
# 发送email
receiver = list(set(email_list.split(';')))
sender = EMAIL_SENDER
smtpserver = EMAIL_SERVER
username = EMAIL_USERNAME
password = <PASSWORD>
# 三个参数:第一个为文本内容,第二个 plain 设置文本格式,第三个 utf-8 设置编码
msg = MIMEMultipart() #
if sub_type == "text":
text_msg = MIMEText(email_text, 'plain', 'utf-8') # 文本格式
elif sub_type == "html":
text_msg = MIMEText(email_text, _subtype='html', _charset='utf-8') # html格式
else:
text_msg = MIMEText(email_text, 'plain', 'utf-8') # 文本格式
msg.attach(text_msg)
msg['From'] = sender
msg['To'] = ";".join(receiver)
msg['Subject'] = Header(subject, 'utf-8')
# 构造MIMEBase对象做为文件附件内容并附加到根容器
filepath = filepath.strip()
if os.path.isfile(filepath):
contype = 'application/octet-stream'
maintype, subtype = contype.split('/', 1)
data = open(filepath, 'rb')
file_msg = MIMEBase(maintype, subtype)
file_msg.set_payload(data.read())
data.close()
email.encoders.encode_base64(file_msg)
filename_list = filepath.split('/')
filename = filename_list[len(filename_list) - 1]
basename = os.path.basename(filename)
file_msg.add_header('Content-Disposition', 'attachment', filename=basename)
msg.attach(file_msg)
is_send_success = False
resend_times = 0
for i in range(0, 3):
smtp = ""
try:
smtp = smtplib.SMTP(smtpserver)
smtp.login(username, password)
# 用smtp发送邮件
smtp.sendmail(sender, receiver, msg.as_string())
is_send_success = True
break
except Exception as e:
resend_times += 1
user_logger.debug("发送第%s次失败!10秒后重试!" % resend_times)
user_logger.error(traceback.format_exc())
time.sleep(10) # 休眠10秒,10秒后重发
if len(receiver) == 0:
return False
finally:
if smtp != "":
smtp.quit()
if is_send_success:
return True
else:
return False
except Exception as e:
print(traceback.format_exc())
return False
def whether_display_name(namestr):
if re.match("^[\u4e00-\u9fa5]{2,4}\([a-z]{1,}[0-9]{0,}\)$", namestr):
return True
else:
return False
if __name__ == "__main__":
retstr = get_email_list("<EMAIL>;<EMAIL>;王蕾(wanglei05);,<EMAIL>,")
print(retstr)
print(type(retstr))
|
sdk/customproviders/azure-mgmt-customproviders/azure/mgmt/customproviders/models/__init__.py | rsdoherty/azure-sdk-for-python | 2,728 | 12757531 | <reponame>rsdoherty/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Association
from ._models_py3 import AssociationsList
from ._models_py3 import CustomRPActionRouteDefinition
from ._models_py3 import CustomRPManifest
from ._models_py3 import CustomRPResourceTypeRouteDefinition
from ._models_py3 import CustomRPRouteDefinition
from ._models_py3 import CustomRPValidations
from ._models_py3 import ErrorDefinition
from ._models_py3 import ErrorResponse
from ._models_py3 import ListByCustomRPManifest
from ._models_py3 import Resource
from ._models_py3 import ResourceProviderOperation
from ._models_py3 import ResourceProviderOperationDisplay
from ._models_py3 import ResourceProviderOperationList
from ._models_py3 import ResourceProvidersUpdate
except (SyntaxError, ImportError):
from ._models import Association # type: ignore
from ._models import AssociationsList # type: ignore
from ._models import CustomRPActionRouteDefinition # type: ignore
from ._models import CustomRPManifest # type: ignore
from ._models import CustomRPResourceTypeRouteDefinition # type: ignore
from ._models import CustomRPRouteDefinition # type: ignore
from ._models import CustomRPValidations # type: ignore
from ._models import ErrorDefinition # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import ListByCustomRPManifest # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceProviderOperation # type: ignore
from ._models import ResourceProviderOperationDisplay # type: ignore
from ._models import ResourceProviderOperationList # type: ignore
from ._models import ResourceProvidersUpdate # type: ignore
from ._customproviders_enums import (
ActionRouting,
ProvisioningState,
ResourceTypeRouting,
ValidationType,
)
__all__ = [
'Association',
'AssociationsList',
'CustomRPActionRouteDefinition',
'CustomRPManifest',
'CustomRPResourceTypeRouteDefinition',
'CustomRPRouteDefinition',
'CustomRPValidations',
'ErrorDefinition',
'ErrorResponse',
'ListByCustomRPManifest',
'Resource',
'ResourceProviderOperation',
'ResourceProviderOperationDisplay',
'ResourceProviderOperationList',
'ResourceProvidersUpdate',
'ActionRouting',
'ProvisioningState',
'ResourceTypeRouting',
'ValidationType',
]
|
wakatime/main.py | sklirg/wakatime | 220 | 12757542 | # -*- coding: utf-8 -*-
"""
wakatime.main
~~~~~~~~~~~~~
Module entry point.
:copyright: (c) 2013 <NAME>.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import traceback
pwd = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(pwd))
sys.path.insert(0, os.path.join(pwd, 'packages'))
from .__about__ import __version__
from .api import send_heartbeats
from .arguments import parse_arguments
from .compat import u, json
from .constants import SUCCESS, UNKNOWN_ERROR, HEARTBEATS_PER_REQUEST
from .logger import setup_logging
log = logging.getLogger('WakaTime')
from .heartbeat import Heartbeat
from .offlinequeue import Queue
def execute(argv=None):
if argv:
sys.argv = ['wakatime'] + argv
args, configs = parse_arguments()
setup_logging(args, __version__)
try:
heartbeats = []
hb = Heartbeat(vars(args), args, configs)
if hb:
heartbeats.append(hb)
else:
log.debug(hb.skip)
if args.extra_heartbeats:
try:
for extra_data in json.loads(sys.stdin.readline()):
hb = Heartbeat(extra_data, args, configs)
if hb:
heartbeats.append(hb)
else:
log.debug(hb.skip)
except json.JSONDecodeError as ex:
log.warning(u('Malformed extra heartbeats json: {msg}').format(
msg=u(ex),
))
retval = SUCCESS
while heartbeats:
retval = send_heartbeats(heartbeats[:HEARTBEATS_PER_REQUEST], args, configs)
heartbeats = heartbeats[HEARTBEATS_PER_REQUEST:]
if retval != SUCCESS:
break
if heartbeats:
Queue(args, configs).push_many(heartbeats)
if retval == SUCCESS:
queue = Queue(args, configs)
for offline_heartbeats in queue.pop_many(args.sync_offline_activity):
time.sleep(1)
retval = send_heartbeats(offline_heartbeats, args, configs)
if retval != SUCCESS:
break
return retval
except:
log.traceback(logging.ERROR)
print(traceback.format_exc())
return UNKNOWN_ERROR
|
single_inference/test_kitti.py | yohannes-taye/mobilePydnet | 182 | 12757578 | """
Evaluate the model using Eigen split of KITTI dataset
- prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py
"""
import argparse
import os
import cv2
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from eval_utils import compute_errors, compute_scale_and_shift
from network import Pydnet
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class KITTILoader(object):
def __init__(self, params):
self.params = params
self.height = params["height"]
self.width = params["width"]
self.data_list_file = params["data_list_file"]
self.data_path = params["data_path"]
self.num_workers = 4
self.data_list = np.loadtxt(self.data_list_file, dtype=bytes).astype(np.str)
self.default_img_shape = None
def read_and_decode(self, filename_queue):
"""Read jpeg file from file system"""
img0_name = tf.strings.join([self.data_path, "/", filename_queue, ".jpg"])
img0 = tf.image.decode_jpeg(tf.io.read_file(img0_name), channels=3)
img0 = tf.cast(img0, tf.float32)
return img0
def preprocess(self, filename_queue):
"""Prepare single image at testing time"""
img0 = self.read_and_decode(filename_queue)
img0 = tf.image.resize_images(img0, [self.height, self.width], tf.image.ResizeMethod.AREA)
img0.set_shape([self.height, self.width, 3])
img0 = img0 / 255.0
return img0
def create_iterator(self, num_parallel_calls=4):
"""Create iterator"""
data_list = tf.convert_to_tensor(self.data_list, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(data_list)
dataset = dataset.map(self.preprocess, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(1)
dataset = dataset.repeat()
iterator = dataset.make_initializable_iterator()
return iterator
def read_test_files(test_file) -> list:
"""Read test files from txt file"""
assert os.path.exists(test_file)
with open(test_file, "r") as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
def run_inference(opts):
"""Run the model on KITTI"""
network_params = {"height": 320, "width": 640, "is_training": False}
dataset_params = {
"height": 320,
"width": 640,
"data_path": opts.data_path,
"data_list_file": opts.data_list_file,
}
dataset = KITTILoader(dataset_params)
iterator = dataset.create_iterator()
batch_img = iterator.get_next()
network = Pydnet(network_params)
predicted_idepth = network.forward(batch_img)
predicted_idepth = tf.nn.relu(predicted_idepth)
# restore graph
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(iterator.initializer)
saver.restore(sess, opts.ckpt)
os.makedirs(opts.dest, exist_ok=True)
test_images = read_test_files(opts.data_list_file)
num_images = len(test_images)
with tqdm(total=num_images) as pbar:
for i in range(num_images):
idepth = sess.run(predicted_idepth)
idepth = np.squeeze(idepth)
min_idepth = idepth.min()
max_idepth = idepth.max()
norm_idepth = (idepth - min_idepth) / (max_idepth - min_idepth)
norm_idepth *= 255.0
target_path = os.path.join(opts.data_path, f"{test_images[i]}.jpg")
target = cv2.imread(target_path)
h, w = target.shape[:2]
norm_idepth = cv2.resize(norm_idepth, (w, h))
img_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
cv2.imwrite(img_path, (norm_idepth * 256.0).astype(np.uint16))
pbar.update(1)
print("Inference done!")
def eval(opts):
"""Compute error metrics."""
errors = []
test_images = read_test_files(opts.data_list_file)
print("=> loading gt data")
gt_depths = np.load(opts.gt_path, fix_imports=True, encoding="latin1", allow_pickle=True)[
"data"
]
print("=> starting evaluation")
with tqdm(total=len(test_images)) as pbar:
for i in range(len(test_images)):
target = gt_depths[i]
pred_path = os.path.join(opts.dest, f"{str(i).zfill(4)}.png")
prediction_idepth = cv2.imread(pred_path, -1) / 256.0
mask = (target > 1e-3) & (target < opts.max_depth)
target_idepth = np.zeros_like(target)
target_idepth[mask == 1] = 1.0 / target[mask == 1]
scale, shift = compute_scale_and_shift(prediction_idepth, target_idepth, mask)
prediction_idepth_aligned = scale * prediction_idepth + shift
disparity_cap = 1.0 / opts.max_depth
prediction_idepth_aligned[prediction_idepth_aligned < disparity_cap] = disparity_cap
prediciton_depth_aligned = 1.0 / prediction_idepth_aligned
prediciton_depth_aligned = prediciton_depth_aligned[mask == 1]
target = target[mask == 1]
errors.append(compute_errors(target, prediciton_depth_aligned))
pbar.update(1)
mean_errors = np.array(errors).mean(0)
labels = ["abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
for i in range(len(labels)):
print(f"{labels[i]}:{mean_errors[i]}")
print("Evaluation done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate depth network on KITTI")
parser.add_argument("--ckpt", type=str, help="path to checkpoint", required=True)
parser.add_argument("--data_path", type=str, help="path to kitti", required=True)
parser.add_argument("--gt_path", type=str, help="path to gt_depths.npz", required=True)
parser.add_argument(
"--data_list_file", type=str, help="path to data list", default="test_kitti.txt"
)
parser.add_argument("--dest", type=str, help="prediction folder", default="kitti")
parser.add_argument("--max_depth", type=float, help="maximum depth value", default=80.0)
opts = parser.parse_args()
run_inference(opts)
eval(opts)
|
Pyto/Samples/SciKit-Image/plot_edge_filter.py | snazari/Pyto | 701 | 12757585 | """
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, sobel_h, sobel_v, scharr, \
scharr_h, scharr_v, prewitt, prewitt_v, prewitt_h, farid_v, farid_h
image = camera()
edge_roberts = roberts(image)
edge_sobel = sobel(image)
fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
ax[0].imshow(edge_roberts, cmap=plt.cm.gray)
ax[0].set_title('Roberts Edge Detection')
ax[1].imshow(edge_sobel, cmap=plt.cm.gray)
ax[1].set_title('Sobel Edge Detection')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# Different operators compute different finite-difference approximations of
# the gradient. For example, the Scharr filter results in a less rotational
# variance than the Sobel filter that is in turn better than the Prewitt
# filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters
# and the Scharr filter is illustrated below with an image that is the
# discretization of a rotation- invariant continuous function. The
# discrepancy between the Prewitt and Sobel filters, and the Scharr filter is
# stronger for regions of the image where the direction of the gradient is
# close to diagonal, and for regions with high spatial frequencies. For the
# example image the differences between the filter results are very small and
# the filter results are visually almost indistinguishable.
#
# .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
#
# .. [2] <NAME>, <NAME>, and <NAME>. Principles of filter design.
# In Handbook of Computer Vision and Applications. Academic Press,
# 1999.
#
# .. [3] https://en.wikipedia.org/wiki/Prewitt_operator
x, y = np.ogrid[:100, :100]
# Rotation-invariant image with different spatial frequencies
img = np.exp(1j * np.hypot(x, y) ** 1.3 / 20.).real
edge_sobel = sobel(img)
edge_scharr = scharr(img)
edge_prewitt = prewitt(img)
diff_scharr_prewitt = edge_scharr - edge_prewitt
diff_scharr_sobel = edge_scharr - edge_sobel
max_diff = np.max(np.maximum(diff_scharr_prewitt, diff_scharr_sobel))
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
ax = axes.ravel()
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(edge_scharr, cmap=plt.cm.gray)
ax[1].set_title('Scharr Edge Detection')
ax[2].imshow(diff_scharr_prewitt, cmap=plt.cm.gray, vmax=max_diff)
ax[2].set_title('Scharr - Prewitt')
ax[3].imshow(diff_scharr_sobel, cmap=plt.cm.gray, vmax=max_diff)
ax[3].set_title('Scharr - Sobel')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# As in the previous example, here we illustrate the rotational invariance of
# the filters. The top row shows a rotationally invariant image along with the
# angle of its analytical gradient. The other two rows contain the difference
# between the different gradient approximations (Sobel, Prewitt, Scharr &
# Farid) and analytical gradient.
#
# The Farid & Simoncelli derivative filters [4]_, [5]_ are the most
# rotationally invariant, but require a 5x5 kernel, which is computationally
# more intensive than a 3x3 kernel.
#
# .. [4] <NAME>. and <NAME>., "Differentiation of discrete
# multidimensional signals", IEEE Transactions on Image Processing 13(4):
# 496-508, 2004. :DOI:`10.1109/TIP.2004.823819`
#
# .. [5] Wikipedia, "Farid and Simoncelli Derivatives." Available at:
# <https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives>
x, y = np.mgrid[-10:10:255j, -10:10:255j]
img = np.sin(x ** 2 + y ** 2)
imgx = 2 * x * np.cos(x ** 2 + y ** 2)
imgy = 2 * y * np.cos(x ** 2 + y ** 2)
def angle(dx, dy):
return np.mod(np.arctan2(dy, dx), np.pi)
true_angle = angle(imgx, imgy)
angle_farid = angle(farid_h(img), farid_v(img))
angle_sobel = angle(sobel_h(img), sobel_v(img))
angle_scharr = angle(scharr_h(img), scharr_v(img))
angle_prewitt = angle(prewitt_h(img), prewitt_v(img))
def diff_angle(angle_1, angle_2):
return np.minimum(np.pi - np.abs(angle_1 - angle_2),
np.abs(angle_1 - angle_2))
diff_farid = diff_angle(true_angle, angle_farid)
diff_sobel = diff_angle(true_angle, angle_sobel)
diff_scharr = diff_angle(true_angle, angle_scharr)
diff_prewitt = diff_angle(true_angle, angle_prewitt)
fig, axes = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True,
figsize=(8, 8))
ax = axes.ravel()
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Original image')
ax[1].imshow(true_angle, cmap=plt.cm.hsv)
ax[1].set_title('Analytical gradient angle')
ax[2].imshow(diff_sobel, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[2].set_title('Sobel error')
ax[3].imshow(diff_prewitt, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[3].set_title('Prewitt error')
ax[4].imshow(diff_scharr, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[4].set_title('Scharr error')
cax = ax[5].imshow(diff_farid, cmap=plt.cm.inferno, vmin=0, vmax=0.02)
ax[5].set_title('Farid error')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.90, 0.10, 0.02, 0.50])
fig.colorbar(cax, cax=cbar_ax, ticks=[0, 0.01, 0.02])
for a in ax:
a.axis('off')
plt.show()
|
VA/main/models/blocks.py | YuJaceKim/Activity-Recognition-with-Combination-of-Deeply-Learned-Visual-Attention-and-Pose-Estimation | 343 | 12757589 | import tensorflow as tf
from keras.models import Model
from deephar.layers import *
from deephar.utils import *
def conv_block(inp, kernel_size, filters, last_act=True):
filters1, filters2, filters3 = filters
x = conv_bn_act(inp, filters1, (1, 1))
x = conv_bn_act(x, filters2, kernel_size)
x = conv_bn(x, filters3, (1, 1))
shortcut = conv_bn(inp, filters3, (1, 1))
x = add([x, shortcut])
if last_act:
x = Activation('relu')(x)
return x
def identity_block(inp, kernel_size, filters, last_act=True):
filters1, filters2, filters3 = filters
x = conv_bn_act(inp, filters1, (1, 1))
x = conv_bn_act(x, filters2, kernel_size)
x = conv_bn(x, filters3, (1, 1))
x = add([x, inp])
if last_act:
x = Activation('relu')(x)
return x
def stem_inception_v4(x, image_div=8):
"""Entry-flow network (stem) *based* on Inception_v4."""
assert image_div in [4, 8, 16, 32], \
'Invalid image_div ({}).'.format(image_div)
x = conv_bn_act(x, 32, (3, 3), strides=(2, 2))
x = conv_bn_act(x, 32, (3, 3))
if image_div is 32:
x = MaxPooling2D((2, 2))(x)
x = conv_bn_act(x, 64, (3, 3))
a = conv_bn_act(x, 96, (3, 3), strides=(2, 2))
b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = concatenate([a, b])
a = conv_bn_act(x, 64, (1, 1))
a = conv(a, 96, (3, 3))
b = conv_bn_act(x, 64, (1, 1))
b = conv_bn_act(b, 64, (5, 1))
b = conv_bn_act(b, 64, (1, 5))
b = conv(b, 96, (3, 3))
x = concatenate([a, b])
x = BatchNormalization(axis=-1, scale=False)(x)
if image_div is not 4:
a = act_conv_bn(x, 192, (3, 3), strides=(2, 2))
b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = concatenate([a, b])
if image_div in [16, 32]:
a = act_conv_bn(x, 192, (3, 3), strides=(2, 2))
b = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = concatenate([a, b])
if image_div is 4:
x = residual(x, int_size=112, out_size=2*192+64, convtype='normal',
name='residual0')
else:
x = residual(x, int_size=144, out_size=3*192, convtype='normal',
name='residual0')
return x
def stem_residual_eccv(x, image_div=8):
"""Entry-flow network (stem) *based* on ResNet ('residual' option)."""
assert image_div in [4, 8, 16, 32], \
'Invalid image_div ({}).'.format(image_div)
x = conv_bn_act(x, 64, (7, 7), strides=(2, 2), padding='same')
a = conv_bn_act(x, 128, (3, 3), padding='same')
b = conv_bn_act(x, 128, (1, 1), padding='same')
x = add([a, b])
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = residual(x, int_size=128, out_size=256, convtype='normal', name='rn0')
x = residual(x, int_size=128, out_size=256, convtype='normal', name='rn1')
if image_div is 4:
x = residual(x, out_size=256, convtype='normal', name='rn3')
else:
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = residual(x, int_size=192, out_size=384, convtype='normal',
name='rn3')
x = residual(x, int_size=192, out_size=384, convtype='normal',
name='rn4')
if image_div in [16, 32]:
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = residual(x, int_size=256, out_size=512, convtype='normal',
name='rn5')
x = residual(x, int_size=256, out_size=512, convtype='normal',
name='rn6')
if image_div is 32:
x = MaxPooling2D((2, 2), strides=(2, 2), padding='same')(x)
return x
def reception_block(x, num_levels, kernel_size, int_size=None,
convtype='depthwise', name=None):
def hourglass(x, n):
up1 = residual(x, kernel_size=kernel_size, int_size=int_size,
convtype=convtype)
low = MaxPooling2D((2, 2))(x)
if n == num_levels:
low = act_conv_bn(low, int(K.int_shape(x)[-1] / 2), (1, 1))
low = residual(low, kernel_size=kernel_size, int_size=int_size,
convtype=convtype)
if n > 2:
low = hourglass(low, n-1)
else:
low = residual(low, kernel_size=kernel_size,
int_size=int_size,
convtype=convtype)
if n == num_levels:
low = residual(low, kernel_size=kernel_size,
out_size=K.int_shape(x)[-1], int_size=int_size,
convtype=convtype)
else:
low = residual(low, kernel_size=kernel_size,
int_size=int_size, convtype=convtype)
up2 = UpSampling2D((2, 2))(low)
x = add([up1, up2])
return x
x = hourglass(x, num_levels)
return x
def build_keypoints_regressor(input_shape, dim, num_maps, sam_model, prob_model,
name=None, verbose=0):
assert num_maps >= 1, \
'The number of maps should be at least 1 (%d given)' % num_maps
inputs = []
inputs3d = []
p_concat = []
v_concat = []
# Auxiliary functions
v_tile = Lambda(lambda x: K.tile(x, (1, 1, dim)))
# This depends on TensorFlow because keras does not implement divide.
tf_div = Lambda(lambda x: tf.divide(x[0], x[1]))
for i in range(num_maps):
h = Input(shape=input_shape)
inputs.append(h)
h_s = act_channel_softmax(h)
p = sam_model(h_s)
v = prob_model(h_s)
if dim == 3:
d = Input(shape=input_shape)
inputs3d.append(d)
d_s = Activation('sigmoid')(d)
dm = multiply([d_s, h_s])
z = Lambda(lambda x: K.sum(x, axis=(1, 2)))(dm)
z = Lambda(lambda x: K.expand_dims(x, axis=-1))(z)
p = concatenate([p, z])
if num_maps > 1:
t = v_tile(v)
p = multiply([p, v_tile(v)])
p_concat.append(p)
v_concat.append(v)
if num_maps > 1:
p = add(p_concat)
v_sum = add(v_concat)
p = tf_div([p, v_tile(v_sum)])
v = maximum(v_concat)
else:
p = p_concat[0]
v = v_concat[0]
model = Model(inputs+inputs3d, [p, v], name=name)
if verbose:
model.summary()
return model
def build_context_aggregation(num_joints, num_context, alpha,
num_frames=1, name=None):
inp = Input(shape=(num_joints * num_context, 1))
d = Dense(num_joints, use_bias=False)
x = Lambda(lambda x: K.squeeze(x, axis=-1))(inp)
x = d(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
w = d.get_weights()
w[0].fill(0)
for j in range(num_joints):
start = j*num_context
w[0][j * num_context : (j + 1) * num_context, j] = 1.
d.set_weights(w)
d.trainable = False
ctx_sum = Model(inputs=inp, outputs=x)
ctx_sum.trainable = False
if num_frames > 1:
ctx_sum = TimeDistributed(ctx_sum,
input_shape=(num_frames,) + K.int_shape(inp)[1:])
# Define auxiliary layers.
mul_alpha = Lambda(lambda x: alpha * x)
mul_1alpha = Lambda(lambda x: (1 - alpha) * x)
# This depends on TensorFlow because keras does not implement divide.
tf_div = Lambda(lambda x: tf.divide(x[0], x[1]))
if num_frames == 1:
# Define inputs
ys = Input(shape=(num_joints, 2))
yc = Input(shape=(num_joints * num_context, 2))
pc = Input(shape=(num_joints * num_context, 1))
# Split contextual predictions in x and y and do computations separately
xi = Lambda(lambda x: x[:,:, 0:1])(yc)
yi = Lambda(lambda x: x[:,:, 1:2])(yc)
else:
ys = Input(shape=(num_frames, num_joints, 2))
yc = Input(shape=(num_frames, num_joints * num_context, 2))
pc = Input(shape=(num_frames, num_joints * num_context, 1))
# Split contextual predictions in x and y and do computations separately
xi = Lambda(lambda x: x[:,:,:, 0:1])(yc)
yi = Lambda(lambda x: x[:,:,:, 1:2])(yc)
pxi = multiply([xi, pc])
pyi = multiply([yi, pc])
pc_sum = ctx_sum(pc)
pxi_sum = ctx_sum(pxi)
pyi_sum = ctx_sum(pyi)
pc_div = Lambda(lambda x: x / num_context)(pc_sum)
pxi_div = tf_div([pxi_sum, pc_sum])
pyi_div = tf_div([pyi_sum, pc_sum])
yc_div = concatenate([pxi_div, pyi_div])
ys_alpha = mul_alpha(ys)
yc_div_1alpha = mul_1alpha(yc_div)
y = add([ys_alpha, yc_div_1alpha])
model = Model(inputs=[ys, yc, pc], outputs=y, name=name)
model.trainable = False
return model
def build_softargmax_1d(input_shape, name=None):
if name is None:
name_sm = None
else:
name_sm = name + '_softmax'
inp = Input(shape=input_shape)
x = act_depth_softmax(inp, name=name_sm)
x = lin_interpolation_1d(x)
model = Model(inputs=inp, outputs=x, name=name)
model.trainable = False
return model
def build_softargmax_2d(input_shape, rho=0., name=None):
if name is None:
name_sm = None
else:
name_sm = name + '_softmax'
inp = Input(shape=input_shape)
x = act_channel_softmax(inp, name=name_sm)
if rho > 0:
x = kl_divergence_regularizer(x, rho=rho)
x_x = lin_interpolation_2d(x, axis=0)
x_y = lin_interpolation_2d(x, axis=1)
x = concatenate([x_x, x_y])
model = Model(inputs=inp, outputs=x, name=name)
model.trainable = False
return model
def build_joints_probability(input_shape, name=None, verbose=0):
inp = Input(shape=input_shape)
x = inp
x = AveragePooling2D((2, 2), strides=(1, 1))(x)
x = Lambda(lambda x: 4*x)(x)
x = GlobalMaxPooling2D()(x)
x = Lambda(lambda x: K.expand_dims(x, axis=-1))(x)
model = Model(inputs=inp, outputs=x, name=name)
if verbose:
model.summary()
return model
|
schedule/migrations/0002_event_color_event.py | kimarakov/schedule | 1,065 | 12757618 | <gh_stars>1000+
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("schedule", "0001_initial")]
operations = [
migrations.AddField(
model_name="event",
name="color_event",
field=models.CharField(
verbose_name="Color event", blank=True, max_length=10, null=True
),
)
]
|
sld-schedule/helpers/api_token.py | guorenxi/Stack-Lifecycle-Deployment | 115 | 12757622 | <reponame>guorenxi/Stack-Lifecycle-Deployment
from helpers.api_request import request_url
from config.api import settings
def get_token(data):
response = request_url(
verb='POST',
headers={'Content-Type': 'application/json'},
uri='authenticate/access-token-json',
json=data
)
if response.get('status_code') == 200:
result = response.get('json')
return result.get('access_token')
if __name__ == "__main__":
print(get_token(settings.CREDENTIALS_ADM))
|
tests/models/hyper_dt_regression_test.py | lixfz/DeepTables | 828 | 12757635 | # -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
import pandas as pd
from deeptables.models import DeepTable
from deeptables.models.hyper_dt import HyperDT, tiny_dt_space
from hypernets.core.callbacks import SummaryCallback, FileStorageLoggingCallback
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers import RandomSearcher
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from .. import homedir
class Test_HyperDT_Regression():
def test_boston(self):
print("Loading datasets...")
boston_dataset = load_boston()
df_train = pd.DataFrame(boston_dataset.data)
df_train.columns = boston_dataset.feature_names
self.y = pd.Series(boston_dataset.target)
self.X = df_train
self.X_train, \
self.X_test, \
self.y_train, \
self.y_test = train_test_split(self.X, self.y, test_size=0.2, random_state=42)
rs = RandomSearcher(tiny_dt_space, optimize_direction=OptimizeDirection.Maximize, )
hdt = HyperDT(rs,
callbacks=[SummaryCallback(), FileStorageLoggingCallback(rs, output_dir=f'{homedir}/hyn_logs')],
reward_metric='RootMeanSquaredError',
dnn_params={
'hidden_units': ((256, 0, False), (256, 0, False)),
'dnn_activation': 'relu',
},
)
hdt.search(self.X_train, self.y_train, self.X_test, self.y_test, max_trials=3)
best_trial = hdt.get_best_trial()
estimator = hdt.final_train(best_trial.space_sample, self.X, self.y)
score = estimator.predict(self.X_test)
result = estimator.evaluate(self.X_test, self.y_test)
assert result
assert isinstance(estimator.model, DeepTable)
|
luminaire/model/window_density.py | Dima2022/luminaire | 525 | 12757654 | from luminaire.model.base_model import BaseModel, BaseModelHyperParams
from luminaire.exploration.data_exploration import DataExploration
class WindowDensityHyperParams(BaseModelHyperParams):
"""
Hyperparameter class for Luminaire Window density model.
:param str freq: The frequency of the time-series. Luminaire supports default configuration for 'S', T, '15T',
'H', 'D'. Any other frequency type should be specified as 'custom' and configuration should be set manually.
:param float max_missing_train_prop: Maximum proportion of missing observation allowed in the training data.
:param bool is_log_transformed: A flag to specify whether to take a log transform of the input data.
If the data contain negatives, is_log_transformed is ignored even though it is set to True.
:param str baseline_type: A string flag to specify whether to take set a baseline as the previous sub-window from
the training data for scoring or to aggregate the overall window as a baseline. Possible values:
- "last_window"
- "aggregated"
:param str detection_method: A string that select between two window testing method. Possible values:
- "kldiv" (KL-divergence). This is recommended to be set for high frequency time series such as 'S', 'T' etc.
- "sign_test" (Wilcoxon sign rank test). This is recommended to be set for low frequency time series such as 'H', 'D' etc.
:param int min_window_length: Minimum size of the scoring window / a stable training sub-window length.
.. Note :: This is not the minimum size of the whole training window which is the combination of stable sub-windows.
:param int max_window_length: Maximum size of the scoring window / a stable training sub-window length.
.. Note :: This is not the maximum size of the whole training window which is the combination of stable sub-windows.
:param int window_length: Size of the scoring window / a stable training sub-window length.
.. Note :: This is not the size of the whole training window which is the combination of stable sub-windows.
:param str detrend_method: A string that select between two stationarizing method. Possible values:
- "ma" (moving average based)
- "diff" (differencing based).
"""
def __init__(self,
freq=None,
max_missing_train_prop=0.1,
is_log_transformed=False,
baseline_type="aggregated",
detection_method=None,
min_window_length=None,
max_window_length=None,
window_length=None,
detrend_method='modeling'
):
super(WindowDensityHyperParams, self).__init__(
model_name="WindowDensityModel",
freq=freq,
max_missing_train_prop=max_missing_train_prop,
is_log_transformed=is_log_transformed,
baseline_type=baseline_type,
detection_method=detection_method,
min_window_length=min_window_length,
max_window_length=max_window_length,
window_length=window_length,
detrend_method=detrend_method
)
class WindowDensityModel(BaseModel):
"""
This model detects anomalous windows using KL divergence (for high frequency data) and Wilcoxon sign rank test
(for low frequency data). This default monitoring frequency is set to pandas time frequency type 'T'.
:param dict hyper_params: Hyper parameters for Luminaire window density model.
See :class:`luminaire.model.window_density.WindowDensityHyperParams` for detailed information.
:return: Anomaly probability for the execution window and other related model outputs
:rtype: list[dict]
"""
__version__ = "0.1"
def __init__(self,
hyper_params: WindowDensityHyperParams().params or None,
**kwargs):
# Specifying the minimum and maximum number of training windows
self.min_num_train_windows = 5
self.max_num_train_windows = 10000
self.hyper_params = hyper_params
self.sig_level = 0.001
super(WindowDensityModel, self).__init__(**hyper_params, **kwargs)
def _volume_shift_detection(self, mean_list=None, sd_list=None, probability_threshold=0.5):
"""
This function detects any significant shift in the training data volume using a Bayesian change point detection
technique.
:param list mean_list: The list of means from each training sub-window.
:param list sd_list: The list of standard deviations from each training sub-window.
:param float probability_threshold: Threshold for the probability value to be flagged as a change point.
:return: Indices with significant vdata volume shift.
:rtype: int
"""
import numpy as np
from bayesian_changepoint_detection import offline_changepoint_detection as offcd
from functools import partial
# Volume shift detection over the means of the training window
q, p, pcp = offcd.offline_changepoint_detection(
data=np.array(mean_list),
prior_func=partial(offcd.const_prior, l=(len(mean_list) + 1)),
observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood,
truncate=-10)
mask_mean = np.append(0, np.exp(pcp).sum(0)) > probability_threshold
# Volume shift detection over the standard deviations of the training window
change_points = np.array(mask_mean).nonzero()
last_mean_cp = change_points[0][-1] if len(change_points[0]) > 0 else []
q, p, pcp = offcd.offline_changepoint_detection(
data=np.array(sd_list),
prior_func=partial(offcd.const_prior, l=(len(sd_list) + 1)),
observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood,
truncate=-10)
mask_sd = np.append(0, np.exp(pcp).sum(0)) > probability_threshold
change_points = np.array(mask_sd).nonzero()
last_sd_cp = change_points[0][-1] if len(change_points[0]) > 0 else []
# Change point is the maximum obtained from mean list and the standard deviation list
cdate = max(last_mean_cp, last_sd_cp)
return cdate
def _distance_function(self, data=None, called_for=None, baseline=None):
"""
This function finds the distance of the given data from the baseline using KL divergence.
:param list data: The list containing the scoring window (for scoring) / training sub-window (for training).
:param str distance_method: The method to be used to calculate the distance between two datasets.
:param str called_for: A flag to specify whether this function is called for training or scoring.
:param list baseline: A list containing the base line to be compared with the given data.
:return: KL divergence between two time windows.
:rtype: float
"""
import numpy as np
import scipy.stats as stats
float_min = 1e-50
float_max = 1e50
# If called for training, Kl divergence is performed over each pair of consecutive windows to create
# the past anomaly scores
if called_for == "training":
distance = []
for i in range(0, len(data) - 1):
q = stats.kde.gaussian_kde(data[i])
p = stats.kde.gaussian_kde(data[i + 1])
ts_min = min(np.min(data[i]), np.min(data[i + 1]))
ts_max = max(np.max(data[i]), np.max(data[i + 1]))
density_domain = np.linspace(ts_min, ts_max, 1000)
q = q(density_domain)
p = p(density_domain)
# approximating the zero probability regions to avoid divide by zero issue in KL divergence
q[q == 0] = min(np.array(q)[np.array(q) > 0])
p[p == 0] = min(np.array(p)[np.array(p) > 0])
q = np.clip(q, float_min, float_max)
p = np.clip(p, float_min, float_max)
distance.append(stats.entropy(pk=p, qk=q))
# If called for scoring, Kl divergence is performed between the scoring window and the baseline
elif called_for == "scoring":
q = stats.kde.gaussian_kde(baseline)
p = stats.kde.gaussian_kde(data)
ts_min = min(np.min(baseline), np.min(data))
ts_max = max(np.max(baseline), np.max(data))
density_domain = np.linspace(ts_min, ts_max, 1000)
q = q(density_domain)
p = p(density_domain)
q[q == 0] = min(np.array(q)[np.array(q) > 0])
p[p == 0] = min(np.array(p)[np.array(p) > 0])
q = np.clip(q, float_min, float_max)
p = np.clip(p, float_min, float_max)
distance = stats.entropy(pk=p, qk=q)
return distance
def _training_data_truncation(self, sliced_training_data=None):
"""
This function performs the truncation of the training data using the _volume_shift_detection function.
:param list sliced_training_data: The list containing the training data.
:return: Sliced training sample based on the most recent change point
:rtype: list
"""
import numpy as np
# Change point detection is performed over the means and standard deviations of the sub windows
window_means = []
window_sds = []
for ts in sliced_training_data:
window_means.append(np.mean(ts))
window_sds.append(np.std(ts))
change_point = self._volume_shift_detection(mean_list=window_means, sd_list=window_sds)
# Truncating the training data based on the last change point
if change_point:
sliced_training_data_truncated = sliced_training_data[change_point:]
return sliced_training_data_truncated
else:
return sliced_training_data
def _call_training(self, df=None, window_length=None, imputed_metric=None, detrend_method=None,
detection_method=None, freq=None, **kwargs):
"""
This function generates the baseline and training metrics to be used for scoring.
:param pandas.DataFrame df: Input training data frame.
:param int window_length: The length of a training sub-window.
:param str imputed_metric: Column storing the time series values.
:param str detrend_method: Detrend method "modeling" or "diff" for nonstationarity.
:param str detection_method: Detection method "kldiv" or "sign_test".
:param str freq: Data frequency.
:return: Returns past anomaly scores based on training data, baseline and other related metrics.
:rtype: tuple(list, float, float, float, int, list, luminaire.model, float, dict, list)
"""
import pandas as pd
past_anomaly_scores = dict()
gamma_alpha = dict()
gama_loc = dict()
gamma_beta = dict()
detrend_order = dict()
baseline = dict()
agg_data_model = dict()
agg_data = dict()
past_model = kwargs.get('past_model')
training_start = df.first_valid_index()
training_end = df.last_valid_index()
current_training_end = training_end
while (training_end - current_training_end) < pd.Timedelta('1D'):
df_current = df[df.index <= current_training_end]
past_anomaly_scores_current, gamma_alpha_current, gama_loc_current, gamma_beta_current, \
detrend_order_current, baseline_current, agg_data_model_current, \
agg_data_current = self._anomalous_region_detection(input_df=df_current,
window_length=window_length,
value_column=imputed_metric,
called_for="training",
detrend_method=detrend_method,
past_model=past_model,
detection_method=detection_method)
past_anomaly_scores.update({str(current_training_end.time().strftime('%H:%M:%S')): past_anomaly_scores_current})
gamma_alpha.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_alpha_current) if gamma_alpha_current else None})
gama_loc.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gama_loc_current) if gama_loc_current else None})
gamma_beta.update({str(current_training_end.time().strftime('%H:%M:%S')): float(gamma_beta_current) if gamma_beta_current else None})
detrend_order.update({str(current_training_end.time().strftime('%H:%M:%S')): detrend_order_current})
baseline.update({str(current_training_end.time().strftime('%H:%M:%S')): baseline_current})
agg_data_model.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_model_current})
agg_data.update({str(current_training_end.time().strftime('%H:%M:%S')): agg_data_current})
if isinstance(freq, str):
freq = pd.Timedelta('1' + freq)
current_training_end = current_training_end - min(pd.Timedelta('30T'), freq * 10)
return past_anomaly_scores, gamma_alpha, gama_loc, gamma_beta, \
detrend_order, baseline, agg_data_model, agg_data, training_start, training_end
def _get_model(self, input_df=None, window_length=None, value_column=None, detrend_method=None, baseline_type=None,
detection_method=None, past_model=None):
"""
This function runs the training process given the input parameters.
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int window_length: The length of a training sub-window / scoring window.
:param str value_column: Column containing the values.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param str baseline_type: Selects between "aggregated" or "last_window" baseline.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param luminaire.model.window_density.WindowDensityModel past_model: luminaire.model to append model metadata from past
:return: Returns past anomaly scores based on training data, baseline and other related metrics.
:rtype: tuple(list, float, float, float, int, list, luminaire.model, float)
"""
import numpy as np
import pandas as pd
from itertools import chain
import scipy.stats as st
model_history_truncation_prop = 0.25 # This is the proportion of history to truncate from both sides
# everytime we store the past anomaly scores
de_obj = DataExploration()
sliced_training_data, agg_datetime = de_obj._partition(input_df, window_length, value_column)
# performing the stationarity test
sliced_training_data_cleaned, detrend_order, agg_data_model, agg_data = de_obj._detrender(
training_data_sliced=sliced_training_data,
significance_level=0.05,
detrend_method=detrend_method,
agg_datetime=agg_datetime,
past_model=past_model)
# Obtain the past anomaly scores and the anomaly means and standard deviation if the detection method
# is KL divergence
if detection_method == "kldiv":
past_anomaly_scores = np.array(self._distance_function(data=sliced_training_data_cleaned,
called_for="training"))
if past_model:
model_timestamps = list(past_model._params['PastAnomalyScores'].keys())
training_end = input_df.index[-1]
current_min_timedelta = pd.Timedelta('10D')
for timestamp in model_timestamps:
current_datetime = pd.Timestamp(str(training_end.date()) + ' ' + timestamp)
temp_timedelta = training_end - current_datetime
temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(
0) else temp_timedelta
if temp_timedelta < current_min_timedelta:
opt_timestamp = timestamp
current_min_timedelta = temp_timedelta
past_anomaly_scores = np.concatenate([past_model._params['PastAnomalyScores'][opt_timestamp][
int(len(past_anomaly_scores) * model_history_truncation_prop):
-int(len(past_anomaly_scores) * model_history_truncation_prop)]
, past_anomaly_scores])
if len(past_anomaly_scores) < 100:
alpha = []
loc = []
beta = []
for i in range(10):
boot_scores = np.random.choice(past_anomaly_scores.tolist(), size=100, replace=True)
alpha_i, loc_i, beta_i = st.gamma.fit(boot_scores)
alpha.append(alpha_i)
loc.append(loc_i)
beta.append(beta_i)
gamma_alpha = np.mean(alpha)
gamma_loc = np.mean(loc)
gamma_beta = np.mean(beta)
else:
gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(past_anomaly_scores)
else:
past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta = None, None, None, None
# If aggregated baseline type is specified, we take the whole training window as a baseline, else we
# take the last training sub window from the sliced training data
if baseline_type == "aggregated":
sliced_training_data_cleaned = self._training_data_truncation(
sliced_training_data=sliced_training_data_cleaned)
if detection_method == "kldiv":
baseline = list(chain.from_iterable(sliced_training_data_cleaned))
elif detection_method == "sign_test":
baseline = sliced_training_data_cleaned
elif baseline_type == "last_window":
baseline = sliced_training_data_cleaned[-1]
return past_anomaly_scores, gamma_alpha, gamma_loc, gamma_beta, detrend_order, \
baseline, agg_data_model, agg_data
def train(self, data, **kwargs):
"""
Input time series for training.
:param pandas.DataFrame data: Input time series.
:return: Trained model with the training timestamp and a success flag
:rtype: tuple(bool, str, python model object)
>>> data
raw interpolated
index
2017-10-02 00:00:00 118870 118870
2017-10-02 01:00:00 121914 121914
2017-10-02 02:00:00 116097 116097
2017-10-02 03:00:00 94511 94511
2017-10-02 04:00:00 68330 68330
... ... ...
2018-10-10 19:00:00 219908 219908
2018-10-10 20:00:00 219149 219149
2018-10-10 21:00:00 207232 207232
2018-10-10 22:00:00 198741 198741
2018-10-10 23:00:00 213751 213751
>>> hyper_params = WindowDensityHyperParams(freq='H').params
>>> wdm_obj = WindowDensityModel(hyper_params=hyper_params)
>>> success, model = wdm_obj.train(data)
>>> success, model
(True, "2018-10-10 23:00:00", <luminaire.model.window_density.WindowDensityModel object at 0x7fd7c5a34e80>)
"""
import numpy as np
import pandas as pd
freq = pd.Timedelta(self._params['freq']) if self._params['freq'] not in ['S', 'T', '15T', 'H', 'D'] \
else self._params['freq']
if freq in ['S', 'T', '15T', 'H', 'D']:
window_length = self._params['window_length']
else:
min_window_length = self._params['min_window_length']
max_window_length = self._params['max_window_length']
window_length = self._params['window_length']
if not min_window_length or not max_window_length or not window_length:
raise ValueError(
'Training window length with min and max should be specified in case frequency not in the '
'specified list')
is_log_transformed = self._params['is_log_transformed']
detrend_method = self._params['detrend_method']
target_metric = 'raw'
imputed_metric = 'interpolated'
if not self._params['detection_method']:
if freq in ['S', 'T', '15T']:
detection_method = 'kldiv'
elif freq in ['H', 'D']:
detection_method = 'sign_test'
else:
detection_method = 'sign_test' if freq > np.timedelta64(30, 'm') else 'kldiv'
else:
detection_method = self._params['detection_method']
if len(data) == 0:
model = {'ErrorMessage': 'DataFrame length is 0'}
success = False
return success, WindowDensityModel(**model)
# Shift the interpolated value by +1 and get the log. This handles values with 0.
if is_log_transformed:
neg_flag = True if not data[data[target_metric] < 0].empty else False
data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1)
past_anomaly_scores, anomaly_scores_gamma_alpha, anomaly_scores_gamma_loc, anomaly_scores_gamma_beta, \
detrend_order, baseline, agg_data_model, agg_data, \
training_start, training_end = self._call_training(df=data, window_length=window_length,
imputed_metric=imputed_metric,
detrend_method=detrend_method,
detection_method=detection_method,
freq=freq, **kwargs)
success = True
self.hyper_params['is_log_transformed'] = is_log_transformed
self.hyper_params['detection_method'] = detection_method
model = {'TrainingStartDate': str(training_start),
'PastAnomalyScores': past_anomaly_scores,
'AnomalyScoresGammaAlpha': anomaly_scores_gamma_alpha,
'AnomalyScoresGammaLoc': anomaly_scores_gamma_loc,
'AnomalyScoresGammaBeta': anomaly_scores_gamma_beta,
'NonStationarityOrder': detrend_order,
'Baseline': baseline,
'AggregatedDataModel': agg_data_model,
'AggregatedData': agg_data
}
return success, str(training_end), WindowDensityModel(hyper_params=self.hyper_params, **model)
def _call_scoring(self, df=None, target_metric=None, anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None,
anomaly_scores_gamma_beta=None, baseline=None, detrend_order=None, detrend_method=None,
agg_data_model=None, detection_method=None, attributes=None, agg_data=None):
"""
This function generates the anomaly flag and and probability for the scoring window.
:param pandas.DataFrame df: Input training data frame.
:param str target_metric: Column storing the time series values.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param list baseline: A list storing a baseline window used to score the scoring window.
:param int detrend_order: The order of detrending based on MA or differencing method.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param attributes: Model attributes.
:param agg_data: Aggregated Data per day.
:return: Returns the anomaly flag with the corresponding anomaly probability.
:rtype: tuple(bool, float, dict)
"""
is_anomaly, prob_of_anomaly = self._anomalous_region_detection(input_df=df, value_column=target_metric,
called_for="scoring",
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
baseline=baseline,
detrend_order=detrend_order,
detrend_method=detrend_method,
agg_data_model=agg_data_model,
detection_method=detection_method,
agg_data=agg_data)
return is_anomaly, prob_of_anomaly, attributes
def _get_result(self, input_df=None, detrend_order=None, agg_data_model=None, value_column=None,
detrend_method=None, baseline_type=None, detection_method=None, baseline=None,
anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None, anomaly_scores_gamma_beta=None,
agg_data=None):
"""
The function scores the scoring window for anomalies based on the training metrics and the baseline
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int detrend_order: The non-negative order of detrending based on Modeling or differencing method. When
the detrend_order > 0, corresponding detrending need to be performed using the method specified in the model
config.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param str value_column: Column containing the values.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param str baseline_type: Selects between "aggregated" or "last_window" baseline.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param list baseline: A list storing a baseline window used to score the scoring window.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param agg_data: Aggregated Data per day.
:return: Returns the anomaly flag with the corresponding anomaly probability.
:rtype: tuple(bool, float)
"""
import numpy as np
import pandas as pd
import copy
import scipy.stats as st
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EmpiricalCovariance, MinCovDet
import collections
import operator
is_anomaly = False
execution_data = input_df[value_column]
adjusted_execution_data = []
prob_of_anomaly = []
len_req_agg_data_model = 42 # Setting a hard threshold to have predictions from aggregated data
# for stationarity adjustment
if detrend_method == 'diff':
# Obtain the execution data and perform the necessary differencing
execution_data = list(execution_data)
adjusted_execution_data = np.diff(execution_data, detrend_order).tolist() if detrend_order > 0 \
else execution_data
elif detrend_method == 'modeling':
idx = input_df.index.normalize()
dates_freq_dist = dict(collections.Counter(idx))
scoring_datetime = str(max(dates_freq_dist.items(), key=operator.itemgetter(1))[0])
execution_data_avg = np.mean(execution_data)
# If detrending is needed, we scale the scoring data accordingly using the agg_dat_model forecast
if detrend_order > 0:
snapshot_len_max = min(len(agg_data), len_req_agg_data_model)
agg_data_trunc = np.array(agg_data)[:, 1][-snapshot_len_max:]
data_adjust_forecast = []
try:
# Setting the data adjustment window of the original data using the predictions and the CILower and
# CIUpper keeping the prediction uncertainty of the agg_model in mind
if agg_data_model and len(agg_data) > len_req_agg_data_model:
score = agg_data_model.score(execution_data_avg, scoring_datetime)
data_adjust_forecast.append(score['Prediction'])
data_adjust_forecast.append(score['CILower'])
data_adjust_forecast.append(score['CIUpper'])
else:
data_adjust_forecast.append(np.median(agg_data_trunc))
data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit
data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit
except:
# If the scoring for the agg_data_model fails for some reason, we use the latest agg_data for the
# detrending adjustment
data_adjust_forecast.append(np.median(agg_data_trunc))
data_adjust_forecast.append(np.percentile(agg_data_trunc, 5)) # setting a 2-sigma limit
data_adjust_forecast.append(np.percentile(agg_data_trunc, 95)) # setting a 2-sigma limit
for i in range(3):
if data_adjust_forecast[i] != 0:
adjusted_execution_data.append((execution_data / data_adjust_forecast[i]).tolist())
else:
adjusted_execution_data = list(execution_data)
# Kl divergence based anomaly detection
if detection_method == "kldiv":
if detrend_order > 0:
prob_of_anomaly = []
for i in range(3):
current_anomaly_score = self._distance_function(data=adjusted_execution_data[i],
called_for="scoring", baseline=baseline)
prob_of_anomaly.append(st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc, anomaly_scores_gamma_beta))
prob_of_anomaly = np.min(prob_of_anomaly)
else:
current_anomaly_score = self._distance_function(data=adjusted_execution_data,
called_for="scoring", baseline=baseline)
prob_of_anomaly = st.gamma.cdf(current_anomaly_score, anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc, anomaly_scores_gamma_beta)
if 1 - prob_of_anomaly < self.sig_level:
is_anomaly = True
# Sign test based anomaly detection
elif detection_method == "sign_test":
# If last window is the baseline, we perform the Wilcoxon sign rank test for means and levene
# test for variance to detect anomalies
if baseline_type == "last_window":
test_stat_wilcoxon, pvalue_wilcoxon = st.wilcoxon(execution_data, baseline)
test_stat_levene, pvalue_levene = st.levene(execution_data, baseline)
if pvalue_wilcoxon < self.sig_level or pvalue_levene < self.sig_level:
is_anomaly = True
prob_of_anomaly = 1 - min(pvalue_wilcoxon, pvalue_levene)
# If aggregated is the baseline, we perform the Wilcoxon sign rank test for means and gamma distribution
# based test for the past standard deviations to detect anomalies
elif baseline_type == "aggregated":
baseline_sds = np.array(baseline).std(1).tolist()
if detrend_order == 0:
# crearing a 2d list to make it easy to loop through in the following for loop
adjusted_execution_data = [adjusted_execution_data]
for current_adjusted_data in adjusted_execution_data:
baseline_execution_data = copy.copy(baseline)
baseline_execution_data.append(current_adjusted_data)
pca = PCA()
scores = pca.fit_transform(StandardScaler().fit_transform(baseline_execution_data))
robust_cov = MinCovDet().fit(scores[:, :3])
mahalanobis_distance = robust_cov.mahalanobis(scores[:, :3]) # getting the top 3 dimensions
pvalue_mahalanobis = 1 - st.chi2.cdf(mahalanobis_distance[-1],
np.array(baseline_execution_data).shape[1])
gamma_alpha, gamma_loc, gamma_beta = st.gamma.fit(baseline_sds)
pvalue_gamma = 1 - st.gamma.cdf(np.std(current_adjusted_data), gamma_alpha, gamma_loc, gamma_beta)
if pvalue_mahalanobis < self.sig_level or pvalue_gamma < self.sig_level:
is_anomaly = True
prob_of_anomaly.append(1 - min(pvalue_mahalanobis, pvalue_gamma))
prob_of_anomaly = np.min(prob_of_anomaly)
return is_anomaly, prob_of_anomaly
def score(self, data, **kwargs):
"""
Function scores input series for anomalies
:param pandas.DataFrame data: Input time series to score
:return: Output dictionary with scoring summary.
:rtype: dict
>>> data
raw interpolated
index
2018-10-11 00:00:00 204800 204800
2018-10-11 01:00:00 222218 222218
2018-10-11 02:00:00 218903 218903
2018-10-11 03:00:00 190639 190639
2018-10-11 04:00:00 148214 148214
2018-10-11 05:00:00 106358 106358
2018-10-11 06:00:00 70081 70081
2018-10-11 07:00:00 47748 47748
2018-10-11 08:00:00 36837 36837
2018-10-11 09:00:00 33023 33023
2018-10-11 10:00:00 44432 44432
2018-10-11 11:00:00 72773 72773
2018-10-11 12:00:00 115180 115180
2018-10-11 13:00:00 157568 157568
2018-10-11 14:00:00 180174 180174
2018-10-11 15:00:00 190048 190048
2018-10-11 16:00:00 188391 188391
2018-10-11 17:00:00 189233 189233
2018-10-11 18:00:00 191703 191703
2018-10-11 19:00:00 189848 189848
2018-10-11 20:00:00 192685 192685
2018-10-11 21:00:00 196743 196743
2018-10-11 22:00:00 193016 193016
2018-10-11 23:00:00 196441 196441
>>> model
<luminaire.model.window_density.WindowDensityModel object at 0x7fcaab72fdd8>
>>> model.score(data)
{'Success': True, 'ConfLevel': 99.9, 'IsAnomaly': False, 'AnomalyProbability': 0.6963188902776808}
"""
import numpy as np
import pandas as pd
is_log_transformed = self._params['is_log_transformed']
detrend_method = self._params['detrend_method']
target_metric = 'raw'
imputed_metric = 'interpolated'
detection_method = self._params['detection_method']
# We want to make sure the time series does not contain any negatives in case of log transformation
if is_log_transformed:
neg_flag = True if not data[data[target_metric] < 0].empty else False
data[imputed_metric] = data[imputed_metric] if neg_flag else np.log(data[imputed_metric] + 1)
model_timestamps = list(self._params['AnomalyScoresGammaAlpha'].keys())
scoring_start = data.index[0]
current_min_timedelta = pd.Timedelta('10D')
for timestamp in model_timestamps:
current_datetime = pd.Timestamp(str(scoring_start.date()) + ' ' + timestamp)
temp_timedelta = scoring_start - current_datetime
temp_timedelta = pd.Timedelta('1D') + temp_timedelta if temp_timedelta < pd.Timedelta(0) else temp_timedelta
if temp_timedelta < current_min_timedelta:
opt_timestamp = timestamp
current_min_timedelta = temp_timedelta
anomaly_scores_gamma_alpha = self._params['AnomalyScoresGammaAlpha'][opt_timestamp]
anomaly_scores_gamma_loc = self._params['AnomalyScoresGammaLoc'][opt_timestamp]
anomaly_scores_gamma_beta = self._params['AnomalyScoresGammaBeta'][opt_timestamp]
baseline = self._params['Baseline'][opt_timestamp]
detrend_order = self._params['NonStationarityOrder'][opt_timestamp]
agg_data_model = self._params['AggregatedDataModel'][opt_timestamp]
agg_data = self._params['AggregatedData'][opt_timestamp]
is_anomaly, prob_of_anomaly, attributes = self._call_scoring(df=data,
target_metric=target_metric,
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
baseline=baseline,
detrend_order=detrend_order,
detrend_method=detrend_method,
agg_data_model=agg_data_model,
detection_method=detection_method,
agg_data=agg_data)
result = {'Success': True,
'ConfLevel': float(1.0 - self.sig_level) * 100,
'IsAnomaly': is_anomaly,
'AnomalyProbability': float(prob_of_anomaly),
}
return result, data.reset_index().values.tolist()
def _anomalous_region_detection(self, input_df=None, window_length=None,
value_column=None, called_for=None,
anomaly_scores_gamma_alpha=None, anomaly_scores_gamma_loc=None,
anomaly_scores_gamma_beta=None, detrend_order=None, baseline=None,
detrend_method=None, agg_data_model=None, past_model=None, detection_method=None,
agg_data=None):
"""
This function detects anomaly given a training and a scoring window.
:param pandas.DataFrame input_df: Input data containing the training and the scoring data.
:param int window_length: The length of a training sub-window / scoring window.
:param str value_column: A string identifying the value column from the input dataframe
:param str called_for: A flag to specify whether this function is called for training or scoring.
:param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter.
:param float anomaly_scores_gamma_loc: Gamma fit location parameter.
:param float anomaly_scores_gamma_beta: Gamma fit beta parameter.
:param int detrend_order: Number of differencing for the scoring data. Only required if called for scoring.
:param list baseline: The baseline for the scoring. only required if called for scoring.
:param str detrend_method: Selects between "modeling" or "diff" detrend method.
:param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data.
:param luminaire.model.window_density.WindowDensityModel past_model: Past stored window density model.
:param str detection_method: Selects between "kldiv" or "sign_test" distance method.
:param agg_data: Aggregated Data per day.
:return: Anomaly flag with the corresponding probability of anomaly.
:rtype: tuple(bool, float)
"""
baseline_type = self._params['baseline_type']
input_df.fillna(0, inplace=True)
# The function can be called for either training or scoring
if called_for == "training":
return self._get_model(input_df=input_df,
window_length=window_length,
value_column=value_column,
detrend_method=detrend_method,
baseline_type=baseline_type,
detection_method=detection_method,
past_model=past_model)
elif called_for == "scoring":
return self._get_result(input_df=input_df,
detrend_order=detrend_order,
agg_data_model=agg_data_model,
value_column=value_column,
detrend_method=detrend_method,
baseline_type=baseline_type,
detection_method=detection_method,
baseline=baseline,
anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha,
anomaly_scores_gamma_loc=anomaly_scores_gamma_loc,
anomaly_scores_gamma_beta=anomaly_scores_gamma_beta,
agg_data=agg_data)
|
tools/android/roll/update_support_library.py | google-ar/chromium | 777 | 12757664 | #!/usr/bin/env python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Updates the Android support repository (m2repository).
"""
import argparse
import fnmatch
import os
import subprocess
import shutil
import sys
DIR_SOURCE_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..'))
ANDROID_SDK_PATH = os.path.abspath(os.path.join(DIR_SOURCE_ROOT, 'third_party',
'android_tools', 'sdk'))
TARGET_NAME = 'extra-android-m2repository'
# The first version we included was 23.2.1. Any folders that are older than
# that should not be included by Chrome's git repo. Unstable versions should
# also be excluded.
REMOVE_LIST = ['databinding', '13.*', '18.*', '19.*', '20.*', '21.*', '22.*',
'23.0.*', '23.1.*', '23.2.0', '*-alpha*', '*-beta*']
def main():
parser = argparse.ArgumentParser(description='Updates the Android support '
'repository in third_party/android_tools')
parser.add_argument('--sdk-dir',
help='Directory for the Android SDK.')
args = parser.parse_args()
sdk_path = ANDROID_SDK_PATH
if args.sdk_dir is not None:
sdk_path = os.path.abspath(os.path.join(DIR_SOURCE_ROOT, args.sdk_dir))
sdk_tool = os.path.abspath(os.path.join(sdk_path, 'tools', 'android'))
if not os.path.exists(sdk_tool):
print 'SDK tool not found at %s' % sdk_tool
return 1
# Run the android sdk update tool in command line.
subprocess.check_call([sdk_tool, 'update', 'sdk' , '--no-ui',
'--filter', TARGET_NAME])
m2repo = os.path.abspath(os.path.join(sdk_path, 'extras', 'android',
'm2repository'))
# Remove obsolete folders and unused folders according to REMOVE_LIST.
count = 0
for folder, _, _ in os.walk(m2repo):
for pattern in REMOVE_LIST:
if fnmatch.fnmatch(os.path.basename(folder), pattern):
count += 1
print 'Removing %s' % os.path.relpath(folder, sdk_path)
shutil.rmtree(folder)
if count == 0:
print ('No files were removed from the updated support library. '
'Did you update it successfully?')
return 1
if __name__ == '__main__':
sys.exit(main())
|
PyObjCTest/test_nsset.py | Khan/pyobjc-framework-Cocoa | 132 | 12757699 | <gh_stars>100-1000
from PyObjCTools.TestSupport import *
import objc
from Foundation import *
class TestNSSetInteraction(TestCase):
def __testRepeatedAllocInit( self ):
for i in range(1,1000):
a = NSSet.alloc().init()
def __testContains( self ):
x = NSSet.setWithArray_( ["foo", "bar", "baz"] )
self.assert_( "foo" in x )
self.assert_( "notfoo" not in x )
def __testIteration( self ):
x = NSSet.setWithArray_( ["foo", "bar", "baz"] )
for i in x:
self.assert_( i in x )
self.assert_( x.containsObject_( i ) )
def test_varargsConstruction(self):
w = NSSet.setWithObjects_(0,1,2,3,None)
x = NSSet.alloc().initWithObjects_(0,1,2,3,None)
y = NSSet.setWithObjects_count_(range(10), 4)
z = NSSet.alloc().initWithObjects_count_(range(10), 4)
#a = NSSet.alloc().initWithObjects_count_(range(4), None)
self.assert_(len(w) == 4)
self.assert_(len(x) == 4)
self.assert_(len(y) == 4)
self.assert_(len(z) == 4)
#self.assert_(len(a) == 4)
self.assert_(0 in w)
self.assert_(1 in x)
self.assert_(2 in y)
self.assert_(3 in z)
#self.assert_(3 in a)
def test_varargsConstruction2(self):
w = NSMutableSet.setWithObjects_(0,1,2,3,None)
x = NSMutableSet.alloc().initWithObjects_(0,1,2,3,None)
y = NSMutableSet.setWithObjects_count_(range(10), 4)
z = NSMutableSet.alloc().initWithObjects_count_(range(10), 4)
self.assert_(len(w) == 4)
self.assert_(len(x) == 4)
self.assert_(len(y) == 4)
self.assert_(len(z) == 4)
self.assert_(0 in w)
self.assert_(1 in x)
self.assert_(2 in y)
self.assert_(3 in z)
class TestVariadic (TestCase):
def testSetWithObjects(self):
o = NSSet.setWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSSet.setWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.setWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
o = NSMutableSet.setWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
def testInitWithObjects(self):
o = NSSet.alloc().initWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSSet.alloc().initWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.alloc().initWithObjects_()
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
o = NSMutableSet.alloc().initWithObjects_(1,2,3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
def testSetWithObjectsCount(self):
o = NSSet.setWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
self.assert_(4 not in o)
o = NSSet.setWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSMutableSet.setWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.setWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
def testInitWithObjectsCount(self):
o = NSSet.alloc().initWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
self.assert_(4 not in o)
o = NSSet.alloc().initWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSSet))
o = NSMutableSet.alloc().initWithObjects_count_([1,2,3], 3)
self.assertEqual(len(o), 3)
self.assert_(isinstance(o, NSMutableSet))
self.assert_(1 in o)
self.assert_(2 in o)
self.assert_(3 in o)
o = NSMutableSet.alloc().initWithObjects_count_([1,2,3], 0)
self.assertEqual(len(o), 0)
self.assert_(isinstance(o, NSMutableSet))
def testMethods(self):
self.assertResultIsBOOL(NSSet.containsObject_)
self.assertResultIsBOOL(NSSet.intersectsSet_)
self.assertResultIsBOOL(NSSet.isEqualToSet_)
self.assertResultIsBOOL(NSSet.isSubsetOfSet_)
self.assertArgIsIn(NSSet.setWithObjects_count_, 0)
self.assertArgSizeInArg(NSSet.setWithObjects_count_, 0, 1)
self.assertArgIsIn(NSSet.initWithObjects_count_, 0)
self.assertArgSizeInArg(NSSet.initWithObjects_count_, 0, 1)
self.assertArgIsBOOL(NSSet.initWithSet_copyItems_, 1)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertArgIsBlock(NSSet.enumerateObjectsUsingBlock_, 0, b'v@o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSSet.enumerateObjectsWithOptions_usingBlock_, 1, b'v@o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSSet.objectsPassingTest_, 0, objc._C_NSBOOL + b'@o^' + objc._C_NSBOOL)
self.assertArgIsBlock(NSSet.objectsWithOptions_passingTest_, 1, objc._C_NSBOOL + b'@o^' + objc._C_NSBOOL)
if __name__ == '__main__':
main()
|
fuzzymetaphone.py | sskadamb/csvmatch | 146 | 12757722 | import doublemetaphone
def match(value1, value2):
value1metaphone = doublemetaphone.doublemetaphone(value1)
value2metaphone = doublemetaphone.doublemetaphone(value2)
possibilities = [
value1metaphone[0] == value2metaphone[0],
value1metaphone[0] == value2metaphone[1],
value1metaphone[1] == value2metaphone[0],
value1metaphone[1] == value2metaphone[1] != ''
]
return 1.0 if True in possibilities else 0.0
|
projectCreation/import_images.py | MattSkiff/aerial_wildlife_detection | 166 | 12757788 | <filename>projectCreation/import_images.py
'''
Helper function that imports a set of unlabeled images into the database.
Works recursively (i.e., with images in nested folders) and different file
formats and extensions (.jpg, .JPEG, .png, etc.).
Skips images that have already been added to the database.
Using this script requires the following steps:
1. Make sure your images are of common format and readable by the web
server (i.e., convert camera RAW images first).
2. Copy your image folder into the FileServer's root file directory (i.e.,
corresponding to the path under "staticfiles_dir" in the configuration
*.ini file).
3. Call the script from the AIDE code base on the FileServer instance.
2019-21 <NAME>
'''
import os
import argparse
from psycopg2 import sql
from util.helpers import VALID_IMAGE_EXTENSIONS, listDirectory
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Import images into database.')
parser.add_argument('--project', type=str,
help='Shortname of the project to insert the images into.')
parser.add_argument('--settings_filepath', type=str, default='config/settings.ini', const=1, nargs='?',
help='Manual specification of the directory of the settings.ini file; only considered if environment variable unset (default: "config/settings.ini").')
args = parser.parse_args()
# setup
print('Setup...')
if not 'AIDE_CONFIG_PATH' in os.environ:
os.environ['AIDE_CONFIG_PATH'] = str(args.settings_filepath)
from tqdm import tqdm
import datetime
from util.configDef import Config
from modules import Database
currentDT = datetime.datetime.now()
currentDT = '{}-{}-{} {}:{}:{}'.format(currentDT.year, currentDT.month, currentDT.day, currentDT.hour, currentDT.minute, currentDT.second)
config = Config()
dbConn = Database(config)
if not dbConn.canConnect():
raise Exception('Error connecting to database.')
project = args.project
# check if running on file server
imgBaseDir = config.getProperty('FileServer', 'staticfiles_dir')
if not os.path.isdir(imgBaseDir):
raise Exception(f'"{imgBaseDir}" is not a valid directory on this machine. Are you running the script from the file server?')
if not imgBaseDir.endswith(os.sep):
imgBaseDir += os.sep
# locate all images and their base names
print('Locating image paths...')
imgs = set()
imgFiles = listDirectory(imgBaseDir, recursive=True) #glob.glob(os.path.join(imgBaseDir, '**'), recursive=True) #TODO: check if correct
imgFiles = list(imgFiles)
for i in tqdm(imgFiles):
if os.path.isdir(i):
continue
_, ext = os.path.splitext(i)
if ext.lower() not in VALID_IMAGE_EXTENSIONS:
continue
baseName = i.replace(imgBaseDir, '')
imgs.add(baseName)
# ignore images that are already in database
print('Filter images already in database...')
imgs_existing = dbConn.execute(sql.SQL('''
SELECT filename FROM {};
''').format(sql.Identifier(project, 'image')), None, 'all')
if imgs_existing is not None:
imgs_existing = set([i['filename'] for i in imgs_existing])
else:
imgs_existing = set()
imgs = list(imgs.difference(imgs_existing))
imgs = [(i,) for i in imgs]
# push image to database
print('Adding to database...')
dbConn.insert(sql.SQL('''
INSERT INTO {} (filename)
VALUES %s;
''').format(sql.Identifier(project, 'image')),
imgs)
print('Done.') |
sparkmagic/sparkmagic/tests/test_sessionmanager.py | sciserver/sparkmagic | 1,141 | 12757798 | <reponame>sciserver/sparkmagic<filename>sparkmagic/sparkmagic/tests/test_sessionmanager.py<gh_stars>1000+
import atexit
from mock import MagicMock, PropertyMock
from nose.tools import raises, assert_equals
import sparkmagic.utils.configuration as conf
from sparkmagic.livyclientlib.exceptions import SessionManagementException
from sparkmagic.livyclientlib.sessionmanager import SessionManager
@raises(SessionManagementException)
def test_get_client_throws_when_client_not_exists():
manager = get_session_manager()
manager.get_session("name")
def test_get_client():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
assert_equals(client, manager.get_session("name"))
@raises(SessionManagementException)
def test_delete_client():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
manager.delete_client("name")
manager.get_session("name")
@raises(SessionManagementException)
def test_delete_client_throws_when_client_not_exists():
manager = get_session_manager()
manager.delete_client("name")
@raises(SessionManagementException)
def test_add_client_throws_when_client_exists():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
manager.add_session("name", client)
def test_client_names_returned():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client)
manager.add_session("name1", client)
assert_equals({"name0", "name1"}, set(manager.get_sessions_list()))
def test_get_any_client():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name", client)
assert_equals(client, manager.get_any_session())
@raises(SessionManagementException)
def test_get_any_client_raises_exception_with_no_client():
manager = get_session_manager()
manager.get_any_session()
@raises(SessionManagementException)
def test_get_any_client_raises_exception_with_two_clients():
client = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client)
manager.add_session("name1", client)
manager.get_any_session()
def test_clean_up():
client0 = MagicMock()
client1 = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client0)
manager.add_session("name1", client1)
manager.clean_up_all()
client0.delete.assert_called_once_with()
client1.delete.assert_called_once_with()
def test_cleanup_all_sessions_on_exit():
conf.override(conf.cleanup_all_sessions_on_exit.__name__, True)
client0 = MagicMock()
client1 = MagicMock()
manager = get_session_manager()
manager.add_session("name0", client0)
manager.add_session("name1", client1)
atexit._run_exitfuncs()
client0.delete.assert_called_once_with()
client1.delete.assert_called_once_with()
manager.ipython_display.writeln.assert_called_once_with(u"Cleaning up livy sessions on exit is enabled")
def test_cleanup_all_sessions_on_exit_fails():
"""
Cleanup on exit is best effort only. When cleanup fails, exception is caught and error is logged.
"""
conf.override(conf.cleanup_all_sessions_on_exit.__name__, True)
client0 = MagicMock()
client1 = MagicMock()
client0.delete.side_effect = Exception('Mocked exception for client1.delete')
manager = get_session_manager()
manager.add_session("name0", client0)
manager.add_session("name1", client1)
atexit._run_exitfuncs()
client0.delete.assert_called_once_with()
client1.delete.assert_not_called()
def test_get_session_id_for_client():
manager = get_session_manager()
manager.get_sessions_list = MagicMock(return_value=["name"])
manager._sessions["name"] = MagicMock()
id = manager.get_session_id_for_client("name")
assert id is not None
def test_get_session_name_by_id_endpoint():
manager = get_session_manager()
id_to_search = "0"
endpoint_to_search = "endpoint"
name_to_search = "name"
name = manager.get_session_name_by_id_endpoint(id_to_search, endpoint_to_search)
assert_equals(None, name)
session = MagicMock()
type(session).id = PropertyMock(return_value=int(id_to_search))
session.endpoint = endpoint_to_search
manager.add_session(name_to_search, session)
name = manager.get_session_name_by_id_endpoint(id_to_search, endpoint_to_search)
assert_equals(name_to_search, name)
def test_get_session_id_for_client_not_there():
manager = get_session_manager()
manager.get_sessions_list = MagicMock(return_value=[])
id = manager.get_session_id_for_client("name")
assert id is None
def get_session_manager():
ipython_display = MagicMock()
return SessionManager(ipython_display)
|
persimmon/view/pins/pin.py | AlvarBer/Persimmon | 206 | 12757843 | from persimmon.view.pins.circularbutton import CircularButton # MYPY HACK
from persimmon.view.util import Type, AbstractWidget, Connection
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.graphics import Color, Ellipse, Line
from kivy.input import MotionEvent
from abc import abstractmethod
Builder.load_file('persimmon/view/pins/pin.kv')
class Pin(CircularButton, metaclass=AbstractWidget):
val = ObjectProperty(None, force_dispatch=True)
block = ObjectProperty()
type_ = ObjectProperty(Type.ANY)
@abstractmethod
def on_touch_down(self, touch: MotionEvent) -> bool:
raise NotImplementedError
@abstractmethod
def on_touch_up(self, touch: MotionEvent) -> bool:
raise NotImplementedError
@abstractmethod
def on_connection_delete(self, connection: Connection):
raise NotImplementedError
@abstractmethod
def connect_pin(self, connection: Connection):
raise NotImplementedError
def typesafe(self, other: 'Pin') -> bool:
""" Tells if a relation between two pins is typesafe. """
if self.block == other.block or self.__class__ == other.__class__:
return False
elif self.type_ == Type.ANY or other.type_ == Type.ANY:
return True # Anything is possible with ANY
else:
return self.type_ == other.type_
# Hack
def on_type_(self, instance: 'Pin', value: Type):
""" If the kv lang was a bit smarted this would not be needed
"""
self.color = value.value
|
src/django-nonrel/django/forms/extras/__init__.py | adamjmcgrath/glancydesign | 790 | 12757868 | <gh_stars>100-1000
from widgets import *
|
geomdl/multi.py | Maik93/NURBS-Python | 382 | 12757873 | """
.. module:: Multi
:platform: Unix, Windows
:synopsis: Provides container classes for spline geoemtries
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import abc
import warnings
from functools import partial
from multiprocessing import Value, Lock
from . import abstract
from . import vis
from . import voxelize
from . import utilities
from . import tessellate
from . import _utilities as utl
from .exceptions import GeomdlException
@utl.add_metaclass(abc.ABCMeta)
class AbstractContainer(abstract.GeomdlBase):
""" Abstract class for geometry containers.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`sample_size`
"""
def __init__(self, *args, **kwargs):
self._pdim = 0 if not hasattr(self, '_pdim') else self._pdim # number of parametric dimensions
self._dinit = 0.01 if not hasattr(self, '_dinit') else self._dinit # delta initialization value
super(AbstractContainer, self).__init__(**kwargs)
self._geometry_type = "container"
self._name = self._geometry_type
self._delta = [float(self._dinit) for _ in range(self._pdim)] # evaluation delta
self._elements = [] # list of elements contained
self._vis_component = None # visualization component
self._cache['evalpts'] = []
def __iter__(self):
self._iter_index = 0
return self
def next(self):
return self.__next__()
def __next__(self):
try:
result = self._elements[self._iter_index]
except IndexError:
raise StopIteration
self._iter_index += 1
return result
def __reversed__(self):
return reversed(self._elements)
def __getitem__(self, index):
return self._elements[index]
def __len__(self):
return len(self._elements)
def __add__(self, other):
if not isinstance(other, self.__class__):
raise GeomdlException("Cannot add non-matching container types")
self.add(other)
return self
@property
def pdimension(self):
""" Parametric dimension.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the parametric dimension
:type: int
"""
return self._pdim
@property
def evalpts(self):
""" Evaluated points.
Since there are multiple geometry objects contained in the multi objects, the evaluated points will be returned in the
format of list of individual evaluated points which is also a list of Cartesian coordinates.
The following code example illustrates these details:
.. code-block:: python
:linenos:
multi_obj = multi.SurfaceContainer() # it can also be multi.CurveContainer()
# Add geometries to multi_obj via multi_obj.add() method
# Then, the following loop will print all the evaluated points of the Multi object
for idx, mpt in enumerate(multi_obj.evalpts):
print("Shape", idx+1, "contains", len(mpt), "points. These points are:")
for pt in mpt:
line = ", ".join([str(p) for p in pt])
print(line)
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the evaluated points of all contained geometries
"""
if not self._cache['evalpts']:
for elem in self._elements:
elem.delta = self._delta[0] if self._pdim == 1 else self._delta
evalpts = elem.evalpts
self._cache['evalpts'] += evalpts
return self._cache['evalpts']
@property
def bbox(self):
""" Bounding box.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the bounding box of all contained geometries
"""
all_box = []
for elem in self._elements:
all_box += list(elem.bbox)
return utilities.evaluate_bounding_box(all_box)
@property
def vis(self):
""" Visualization component.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the visualization component
:setter: Sets the visualization component
"""
return self._vis_component
@vis.setter
def vis(self, value):
if not isinstance(value, vis.VisAbstract):
warnings.warn("Visualization component is NOT an instance of the vis.VisAbstract class")
return
self._vis_component = value
@property
def delta(self):
""" Evaluation delta (for all parametric directions).
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta value, smoother the shape.
The following figure illustrates the working principles of the delta property:
.. math::
\\left[{{u_{start}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right]
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value
:setter: Sets the delta value
"""
return self._delta[0] if self._pdim == 1 else self._delta
@delta.setter
def delta(self, value):
if self._pdim == 1 and isinstance(value, (int, float)):
delta_vals = [value]
else:
if isinstance(value, (list, tuple)):
if len(value) != self._pdim:
raise ValueError("The input must be a list of a tuple with a length of " + str(self._pdim))
delta_vals = value
elif isinstance(value, (int, float)):
delta_vals = [value for _ in range(self._pdim)]
else:
raise TypeError("Unsupported input type for evaluation delta. Use float, list or tuple")
# Set delta values
for idx, dval in enumerate(delta_vals):
self._delta_setter_common(idx, dval)
# Reset the cache
self.reset()
def _delta_setter_common(self, idx, value):
# Check and set the delta value corresponding to the idx-th parametric dimension
if float(value) <= 0 or float(value) >= 1:
raise ValueError("Evaluation delta should be between 0.0 and 1.0. You are trying to set it to " + str(value)
+ " for the " + str(idx + 1) + "st parametric dimension.")
self._delta[idx] = float(value)
@property
def sample_size(self):
""" Sample size (for all parametric directions).
Sample size defines the number of points to evaluate. It also sets the ``delta`` property.
The following figure illustrates the working principles of sample size property:
.. math::
\\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}}
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size
:setter: Sets sample size
"""
ssz = [self._sample_size_getter_common(idx) for idx in range(self._pdim)]
return ssz[0] if self._pdim == 1 else ssz
@sample_size.setter
def sample_size(self, value):
if self._pdim == 1 and isinstance(value, (int, float)):
ssz = [value]
else:
if isinstance(value, (list, tuple)):
if len(value) != self._pdim:
raise ValueError("The input must be a list of a tuple with a length of " + str(self._pdim))
ssz = value
elif isinstance(value, (int, float)):
ssz = [value for _ in range(self._pdim)]
else:
raise TypeError("Unsupported input type for sample size. Use float, list or tuple")
# Set sample size
for idx, sval in enumerate(ssz):
self._sample_size_setter_common(idx, sval)
# Reset the cache
self.reset()
def _sample_size_getter_common(self, idx):
return int(1 / self._delta[idx]) + 1
def _sample_size_setter_common(self, idx, value):
# Check and set the delta value corresponding to the idx-th parametric dimension
if not isinstance(value, int):
raise GeomdlException("Sample size must be an integer value bigger than 2")
if value < 2:
raise GeomdlException("Sample size must be an integer value bigger than 2")
self._delta[idx] = 1.0 / float(value - 1)
@property
def data(self):
""" Returns a dict which contains the geometry data.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
"""
return tuple([e.data for e in self._elements])
def add(self, element):
""" Adds geometry objects to the container.
The input can be a single geometry, a list of geometry objects or a geometry container object.
:param element: geometry object
"""
if isinstance(element, (self.__class__, list, tuple)):
for elem in element:
self.add(elem)
elif hasattr(self, '_pdim'):
if element.pdimension == self.pdimension:
if self.dimension == 0:
self._dimension = element.dimension
else:
if self.dimension != element.dimension:
raise GeomdlException("The spatial dimensions of the container and the input must be the same")
self._elements.append(element)
else:
raise GeomdlException("Cannot add the element to the container")
# Reset the cache
self.reset()
# Make container look like a list
append = add
def reset(self):
""" Resets the cache. """
self._cache['evalpts'][:] = []
# Runs visualization component to render the surface
@abc.abstractmethod
def render(self, **kwargs):
""" Renders plots using the visualization component.
.. note::
This is an abstract method and it must be implemented in the subclass.
"""
pass
@utl.export
class CurveContainer(AbstractContainer):
""" Container class for storing multiple curves.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`sample_size`
The following code example illustrates the usage of the Python properties:
.. code-block:: python
# Create a multi-curve container instance
mcrv = multi.CurveContainer()
# Add single or multi curves to the multi container using mcrv.add() command
# Addition operator, e.g. mcrv1 + mcrv2, also works
# Set the evaluation delta of the multi-curve
mcrv.delta = 0.05
# Get the evaluated points
curve_points = mcrv.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 1 # number of parametric dimensions
self._dinit = 0.01 # evaluation delta
super(CurveContainer, self).__init__(*args, **kwargs)
for arg in args:
self.add(arg)
def render(self, **kwargs):
""" Renders the curves.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grid
* ``evalcolor``: sets the color of the surface
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the curves inside the container. *Default: False*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
"""
if not self._vis_component:
warnings.warn("No visualization component has set")
return
# Get the color values from keyword arguments
cpcolor = kwargs.get('cpcolor')
evalcolor = kwargs.get('evalcolor')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
animate_plot = kwargs.get('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.get('delta', True)
reset_names = kwargs.get('reset_names', False)
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) < len(self._elements):
raise ValueError("The number of color values in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) < len(self._elements):
raise ValueError("The number of color values in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
if update_delta:
elem.delta = self.delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "curve"
# Fix element name
if elem.name == "curve":
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(cpcolor, evalcolor, idx=idx)
self._vis_component.add(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
self._vis_component.add(ptsarr=elem.evalpts, name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
@utl.export
class SurfaceContainer(AbstractContainer):
""" Container class for storing multiple surfaces.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type` = container
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`tessellator`
* :py:attr:`vertices`
* :py:attr:`faces`
The following code example illustrates the usage of these Python properties:
.. code-block:: python
# Create a multi-surface container instance
msurf = multi.SurfaceContainer()
# Add single or multi surfaces to the multi container using msurf.add() command
# Addition operator, e.g. msurf1 + msurf2, also works
# Set the evaluation delta of the multi-surface
msurf.delta = 0.05
# Get the evaluated points
surface_points = msurf.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 2 # number of parametric dimensions
self._dinit = 0.05 # evaluation delta
super(SurfaceContainer, self).__init__(*args, **kwargs)
self._cache['vertices'] = []
self._cache['faces'] = []
for arg in args:
self.add(arg)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the u-direction
:setter: Sets the delta value for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
self._delta_setter_common(0, value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the v-direction
:setter: Sets the delta value for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
self._delta_setter_common(1, value)
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return self._sample_size_getter_common(0)
@sample_size_u.setter
def sample_size_u(self, value):
self._sample_size_setter_common(0, value)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return self._sample_size_getter_common(1)
@sample_size_v.setter
def sample_size_v(self, value):
self._sample_size_setter_common(1, value)
@property
def tessellator(self):
""" Tessellation component of the surfaces inside the container.
Please refer to :doc:`Tessellation <module_tessellate>` documentation for details.
.. code-block:: python
:linenos:
from geomdl import multi
from geomdl import tessellate
# Create the surface container
surf_container = multi.SurfaceContainer(surf_list)
# Set tessellator component
surf_container.tessellator = tessellate.TrimTessellate()
:getter: gets the tessellation component
:setter: sets the tessellation component
"""
tsl_comps = []
for idx in range(len(self._elements)):
tsl_comps.append(self._elements[idx].tessellator)
return tsl_comps
@tessellator.setter
def tessellator(self, value):
# Set tessellation component
for idx in range(len(self._elements)):
self._elements[idx].tessellator = value.__class__()
@property
def vertices(self):
""" Vertices generated by the tessellation operation.
If the tessellation component is set to None, the result will be an empty list.
:getter: Gets the vertices
"""
if not self._cache['vertices']:
self.tessellate()
return self._cache['vertices']
@property
def faces(self):
""" Faces (triangles, quads, etc.) generated by the tessellation operation.
If the tessellation component is set to None, the result will be an empty list.
:getter: Gets the faces
"""
if not self._cache['faces']:
self.tessellate()
return self._cache['faces']
def tessellate(self, **kwargs):
""" Tessellates the surfaces inside the container.
Keyword arguments are directly passed to the tessellation component.
The following code snippet illustrates getting the vertices and faces of the surfaces inside the container:
.. code-block:: python
:linenos:
# Tessellate the surfaces inside the container
surf_container.tessellate()
# Vertices and faces are stored inside the tessellator component
tsl = surf_container.tessellator
# Loop through all tessellator components
for t in tsl:
# Get the vertices
vertices = t.tessellator.vertices
# Get the faces (triangles, quads, etc.)
faces = t.tessellator.faces
Keyword Arguments:
* ``num_procs``: number of concurrent processes for tessellating the surfaces. *Default: 1*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``force``: flag to force tessellation. *Default: False*
"""
# Keyword arguments
force_tsl = kwargs.get('force', False)
update_delta = kwargs.pop('delta', True)
# Don't re-tessellate if everything is in place
if all((self._cache['vertices'], self._cache['faces'])) and not force_tsl:
return
# Tessellate the surfaces in the container
num_procs = kwargs.pop('num_procs', 1)
new_elems = []
if num_procs > 1:
with utl.pool_context(processes=num_procs) as pool:
tmp_elem = pool.map(partial(process_tessellate, delta=self.delta, update_delta=update_delta, **kwargs),
self._elements)
new_elems += tmp_elem
else:
for idx in range(len(self._elements)):
tmp_elem = process_tessellate(self._elements[idx], delta=self.delta, update_delta=update_delta, **kwargs)
new_elems.append(tmp_elem)
self._elements = new_elems
# Update caches
verts = []
faces = []
v_offset = 0
f_offset = 0
for elem in self._elements:
v = elem.vertices
for i in range(len(v)):
v[i].id += v_offset
verts += v
f = elem.faces
for i in range(len(f)):
f[i].id += f_offset
# for j in range(len(f[i]._data)):
# f[i]._data[j].id += v_offset
faces += f
v_offset += len(v)
f_offset += len(f)
self._cache['vertices'] = verts
self._cache['faces'] = faces
def reset(self):
""" Resets the cache. """
super(SurfaceContainer, self).reset()
self._cache['vertices'][:] = []
self._cache['faces'][:] = []
def render(self, **kwargs):
""" Renders the surfaces.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points grids
* ``evalcolor``: sets the color of the surface
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``colormap``: sets the colormap of the surfaces
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the surfaces inside the container. *Default: False*
* ``num_procs``: number of concurrent processes for rendering the surfaces. *Default: 1*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
Please note that ``colormap`` argument can only work with visualization classes that support colormaps. As an
example, please see :py:class:`.VisMPL.VisSurfTriangle()` class documentation. This method expects multiple
colormap inputs as a list or tuple, preferable the input list size is the same as the number of surfaces
contained in the class. In the case of number of surfaces is bigger than number of input colormaps, this method
will automatically assign a random color for the remaining surfaces.
"""
# Validation
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
# Get the color values from keyword arguments
cpcolor = kwargs.get('cpcolor')
evalcolor = kwargs.get('evalcolor')
trimcolor = kwargs.get('trimcolor', 'black')
filename = kwargs.get('filename', None)
plot_visible = kwargs.get('plot', True)
animate_plot = kwargs.get('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.get('delta', True)
reset_names = kwargs.get('reset_names', False)
# Number of parallel processes
num_procs = kwargs.get('num_procs', 1)
force_tsl = bool(kwargs.pop('force', False)) # flag to force re-tessellation
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) != len(self._elements):
raise ValueError("The number of colors in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained(" +
str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) != len(self._elements):
raise ValueError("The number of colors in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Get colormaps as a list
surf_cmaps = kwargs.get('colormap', [])
if not isinstance(surf_cmaps, (list, tuple)):
warnings.warn("Expecting a list of colormap values, not " + str(type(surf_cmaps)))
surf_cmaps = []
# Run the visualization component
self._vis_component.clear()
vis_list = []
if num_procs > 1:
mp_lock = Lock()
mp_val = Value('i', 0)
with utl.pool_context(initializer=mp_init, initargs=(mp_lock, mp_val), processes=num_procs) as pool:
tmp = pool.map(partial(process_elements_surface, mconf=self._vis_component.mconf,
colorval=(cpcolor, evalcolor, trimcolor), idx=-1, force_tsl=force_tsl,
update_delta=update_delta, delta=self.delta, reset_names=reset_names),
self._elements)
vis_list += tmp
else:
for idx, elem in enumerate(self._elements):
tmp = process_elements_surface(elem, self._vis_component.mconf, (cpcolor, evalcolor, trimcolor),
idx, force_tsl, update_delta, self.delta, reset_names)
vis_list += tmp
for vl in vis_list:
if isinstance(vl, dict):
self._vis_component.add(**vl)
else:
for v in vl:
self._vis_component.add(**v)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmaps)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible, colormap=surf_cmaps)
@utl.export
class VolumeContainer(AbstractContainer):
""" Container class for storing multiple volumes.
This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in
a for loop.
This class provides the following properties:
* :py:attr:`type`
* :py:attr:`id`
* :py:attr:`name`
* :py:attr:`dimension`
* :py:attr:`opt`
* :py:attr:`pdimension`
* :py:attr:`evalpts`
* :py:attr:`bbox`
* :py:attr:`vis`
* :py:attr:`delta`
* :py:attr:`delta_u`
* :py:attr:`delta_v`
* :py:attr:`delta_w`
* :py:attr:`sample_size`
* :py:attr:`sample_size_u`
* :py:attr:`sample_size_v`
* :py:attr:`sample_size_w`
The following code example illustrates the usage of these Python properties:
.. code-block:: python
# Create a multi-volume container instance
mvol = multi.VolumeContainer()
# Add single or multi volumes to the multi container using mvol.add() command
# Addition operator, e.g. mvol1 + mvol2, also works
# Set the evaluation delta of the multi-volume
mvol.delta = 0.05
# Get the evaluated points
volume_points = mvol.evalpts
"""
def __init__(self, *args, **kwargs):
self._pdim = 3 # number of parametric dimensions
self._dinit = 0.1 # evaluation delta
super(VolumeContainer, self).__init__()
for arg in args:
self.add(arg)
@property
def delta_u(self):
""" Evaluation delta for the u-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the u-direction
:setter: Sets the delta value for the u-direction
:type: float
"""
return self._delta[0]
@delta_u.setter
def delta_u(self, value):
self._delta_setter_common(0, value)
@property
def delta_v(self):
""" Evaluation delta for the v-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the v-direction
:setter: Sets the delta value for the v-direction
:type: float
"""
return self._delta[1]
@delta_v.setter
def delta_v(self, value):
self._delta_setter_common(1, value)
@property
def delta_w(self):
""" Evaluation delta for the w-direction.
Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points.
Therefore; smaller the delta, smoother the shape.
Please note that ``delta_w`` and ``sample_size_w`` properties correspond to the same variable with different
descriptions. Therefore, setting ``delta_w`` will also set ``sample_size_w``.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets the delta value for the w-direction
:setter: Sets the delta value for the w-direction
:type: float
"""
return self._delta[2]
@delta_w.setter
def delta_w(self, value):
self._delta_setter_common(2, value)
@property
def sample_size_u(self):
""" Sample size for the u-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the u-direction
:setter: Sets sample size for the u-direction
:type: int
"""
return self._sample_size_getter_common(0)
@sample_size_u.setter
def sample_size_u(self, value):
self._sample_size_setter_common(0, value)
@property
def sample_size_v(self):
""" Sample size for the v-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the v-direction
:setter: Sets sample size for the v-direction
:type: int
"""
return self._sample_size_getter_common(1)
@sample_size_v.setter
def sample_size_v(self, value):
self._sample_size_setter_common(1, value)
@property
def sample_size_w(self):
""" Sample size for the w-direction.
Sample size defines the number of points to evaluate. It also sets the ``delta_w`` property.
Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details
on using this class member.
:getter: Gets sample size for the w-direction
:setter: Sets sample size for the w-direction
:type: int
"""
return self._sample_size_getter_common(2)
@sample_size_w.setter
def sample_size_w(self, value):
self._sample_size_setter_common(2, value)
def render(self, **kwargs):
""" Renders the volumes.
The visualization component must be set using :py:attr:`~vis` property before calling this method.
Keyword Arguments:
* ``cpcolor``: sets the color of the control points plot
* ``evalcolor``: sets the color of the volume
* ``filename``: saves the plot with the input name
* ``plot``: controls plot window visibility. *Default: True*
* ``animate``: activates animation (if supported). *Default: False*
* ``delta``: if True, the evaluation delta of the container object will be used. *Default: True*
* ``reset_names``: resets the name of the volumes inside the container. *Default: False*
* ``grid_size``: grid size for voxelization. *Default: (16, 16, 16)*
* ``num_procs``: number of concurrent processes for voxelization. *Default: 1*
The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color
values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be
a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows
customization over the color values. If none provided, a random color will be selected.
The ``plot`` argument is useful when you would like to work on the command line without any window context.
If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables
plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the
configuration class.
"""
if not self._vis_component:
warnings.warn("No visualization component has been set")
return
cpcolor = kwargs.pop('cpcolor', None)
evalcolor = kwargs.pop('evalcolor', None)
filename = kwargs.pop('filename', None)
plot_visible = kwargs.pop('plot', True)
animate_plot = kwargs.pop('animate', False)
# Flag to control evaluation delta updates
update_delta = kwargs.pop('delta', True)
reset_names = kwargs.get('reset_names', False)
# Check if the input list sizes are equal
if isinstance(cpcolor, (list, tuple)):
if len(cpcolor) != len(self._elements):
raise ValueError("The number of colors in 'cpcolor' (" + str(len(cpcolor)) +
") cannot be less than the number of geometries contained(" +
str(len(self._elements)) + ")")
if isinstance(evalcolor, (list, tuple)):
if len(evalcolor) != len(self._elements):
raise ValueError("The number of colors in 'evalcolor' (" + str(len(evalcolor)) +
") cannot be less than the number of geometries contained ("
+ str(len(self._elements)) + ")")
# Run the visualization component
self._vis_component.clear()
for idx, elem in enumerate(self._elements):
if update_delta:
elem.delta = self.delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "volume"
# Fix element name
if elem.name == "volume":
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(cpcolor, evalcolor, idx=idx)
# Add control points
if self._vis_component.mconf['ctrlpts'] == 'points':
self._vis_component.add(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
# Add evaluated points
if self._vis_component.mconf['evalpts'] == 'points':
self._vis_component.add(ptsarr=elem.evalpts, name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Add evaluated points as voxels
if self._vis_component.mconf['evalpts'] == 'voxels':
grid, filled = voxelize.voxelize(elem, **kwargs)
polygrid = voxelize.convert_bb_to_faces(grid)
self._vis_component.add(ptsarr=[polygrid, filled], name=elem.name,
color=color[1], plot_type='evalpts', idx=idx)
# Display the figures
if animate_plot:
self._vis_component.animate(fig_save_as=filename, display_plot=plot_visible)
else:
self._vis_component.render(fig_save_as=filename, display_plot=plot_visible)
def select_color(cpcolor, evalcolor, idx=0):
""" Selects item color for plotting.
:param cpcolor: color for control points grid item
:type cpcolor: str, list, tuple
:param evalcolor: color for evaluated points grid item
:type evalcolor: str, list, tuple
:param idx: index of the current geometry object
:type idx: int
:return: a list of color values
:rtype: list
"""
# Random colors by default
color = utilities.color_generator()
# Constant color for control points grid
if isinstance(cpcolor, str):
color[0] = cpcolor
# User-defined color for control points grid
if isinstance(cpcolor, (list, tuple)):
color[0] = cpcolor[idx]
# Constant color for evaluated points grid
if isinstance(evalcolor, str):
color[1] = evalcolor
# User-defined color for evaluated points grid
if isinstance(evalcolor, (list, tuple)):
color[1] = evalcolor[idx]
return color
def process_tessellate(elem, update_delta, delta, **kwargs):
""" Tessellates surfaces.
.. note:: Helper function required for ``multiprocessing``
:param elem: surface
:type elem: abstract.Surface
:param update_delta: flag to control evaluation delta updates
:type update_delta: bool
:param delta: evaluation delta
:type delta: list, tuple
:return: updated surface
:rtype: abstract.Surface
"""
if update_delta:
elem.delta = delta
elem.evaluate()
elem.tessellate(**kwargs)
return elem
def process_elements_surface(elem, mconf, colorval, idx, force_tsl, update_delta, delta, reset_names):
""" Processes visualization elements for surfaces.
.. note:: Helper function required for ``multiprocessing``
:param elem: surface
:type elem: abstract.Surface
:param mconf: visualization module configuration
:type mconf: dict
:param colorval: color values
:type colorval: tuple
:param idx: index of the surface
:type idx: int
:param force_tsl: flag to force re-tessellation
:type force_tsl: bool
:param update_delta: flag to update surface delta
:type update_delta: bool
:param delta: new surface evaluation delta
:type delta: list, tuple
:param reset_names: flag to reset names
:type reset_names: bool
:return: visualization element (as a dict)
:rtype: list
"""
if idx < 0:
lock.acquire()
idx = counter.value
counter.value += 1
lock.release()
if update_delta:
elem.delta = delta
elem.evaluate()
# Reset element name
if reset_names:
elem.name = "surface"
# Fix element name
if elem.name == "surface" and idx >= 0:
elem.name = elem.name + " " + str(idx)
# Color selection
color = select_color(colorval[0], colorval[1], idx=idx)
# Initialize the return list
rl = []
# Add control points
if mconf['ctrlpts'] == 'points':
ret = dict(ptsarr=elem.ctrlpts, name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
rl.append(ret)
# Add control points as quads
if mconf['ctrlpts'] == 'quads':
qtsl = tessellate.QuadTessellate()
qtsl.tessellate(elem.ctrlpts, size_u=elem.ctrlpts_size_u, size_v=elem.ctrlpts_size_v)
ret = dict(ptsarr=[qtsl.vertices, qtsl.faces], name=(elem.name, "(CP)"),
color=color[0], plot_type='ctrlpts', idx=idx)
rl.append(ret)
# Add surface points
if mconf['evalpts'] == 'points':
ret = dict(ptsarr=elem.evalpts, name=(elem.name, idx), color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add surface points as quads
if mconf['evalpts'] == 'quads':
qtsl = tessellate.QuadTessellate()
qtsl.tessellate(elem.evalpts, size_u=elem.sample_size_u, size_v=elem.sample_size_v)
ret = dict(ptsarr=[qtsl.vertices, qtsl.faces],
name=elem.name, color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add surface points as vertices and triangles
if mconf['evalpts'] == 'triangles':
elem.tessellate(force=force_tsl)
ret = dict(ptsarr=[elem.tessellator.vertices, elem.tessellator.faces],
name=elem.name, color=color[1], plot_type='evalpts', idx=idx)
rl.append(ret)
# Add the trim curves
for itc, trim in enumerate(elem.trims):
ret = dict(ptsarr=elem.evaluate_list(trim.evalpts), name=("trim", itc),
color=colorval[2], plot_type='trimcurve', idx=idx)
rl.append(ret)
# Return the list
return rl
def mp_init(l, c):
""" Initialization function for multi-threaded operations.
:param l: lock
:param c: value for common counter
"""
global lock
global counter
lock = l
counter = c
|
tests/nnapi/specs/Ex/transpose_conv_ex_float_1.mod.py | bogus-sudo/ONE-1 | 255 | 12757886 | <gh_stars>100-1000
# model
model = Model()
i0 = Input("op_shape", "TENSOR_INT32", "{4}")
weights = Parameter("ker", "TENSOR_FLOAT32", "{1, 3, 3, 1}", [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}" )
pad = Int32Scalar("pad_same", 1)
s_x = Int32Scalar("stride_x", 1)
s_y = Int32Scalar("stride_y", 1)
i2 = Output("op", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
model = model.Operation("TRANSPOSE_CONV_EX", i0, weights, i1, pad, s_x, s_y).To(i2)
# Example 1. Input in operand 0,
input0 = {i0: # output shape
[1, 4, 4, 1],
i1: # input 0
[1.0, 2.0, 3.0, 4.0,
5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0]}
output0 = {i2: # output 0
[29.0, 62.0, 83.0, 75.0,
99.0, 192.0, 237.0, 198.0,
207.0, 372.0, 417.0, 330.0,
263.0, 446.0, 485.0, 365.0]}
# Instantiate an example
Example((input0, output0))
|
src/datashare/azext_datashare/vendored_sdks/datashare/aio/operations_async/__init__.py | Mannan2812/azure-cli-extensions | 207 | 12757896 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._account_operations_async import AccountOperations
from ._consumer_invitation_operations_async import ConsumerInvitationOperations
from ._data_set_operations_async import DataSetOperations
from ._data_set_mapping_operations_async import DataSetMappingOperations
from ._invitation_operations_async import InvitationOperations
from ._operation_operations_async import OperationOperations
from ._share_operations_async import ShareOperations
from ._provider_share_subscription_operations_async import ProviderShareSubscriptionOperations
from ._share_subscription_operations_async import ShareSubscriptionOperations
from ._consumer_source_data_set_operations_async import ConsumerSourceDataSetOperations
from ._synchronization_setting_operations_async import SynchronizationSettingOperations
from ._trigger_operations_async import TriggerOperations
__all__ = [
'AccountOperations',
'ConsumerInvitationOperations',
'DataSetOperations',
'DataSetMappingOperations',
'InvitationOperations',
'OperationOperations',
'ShareOperations',
'ProviderShareSubscriptionOperations',
'ShareSubscriptionOperations',
'ConsumerSourceDataSetOperations',
'SynchronizationSettingOperations',
'TriggerOperations',
]
|
src/bitmessageqt/messagecompose.py | BeholdersEye/PyBitmessage | 1,583 | 12757902 | """
Message editor with a wheel zoom functionality
"""
# pylint: disable=bad-continuation
from PyQt4 import QtCore, QtGui
class MessageCompose(QtGui.QTextEdit):
"""Editor class with wheel zoom functionality"""
def __init__(self, parent=0):
super(MessageCompose, self).__init__(parent)
self.setAcceptRichText(False)
self.defaultFontPointSize = self.currentFont().pointSize()
def wheelEvent(self, event):
"""Mouse wheel scroll event handler"""
if (
QtGui.QApplication.queryKeyboardModifiers() & QtCore.Qt.ControlModifier
) == QtCore.Qt.ControlModifier and event.orientation() == QtCore.Qt.Vertical:
if event.delta() > 0:
self.zoomIn(1)
else:
self.zoomOut(1)
zoom = self.currentFont().pointSize() * 100 / self.defaultFontPointSize
QtGui.QApplication.activeWindow().statusBar().showMessage(
QtGui.QApplication.translate("MainWindow", "Zoom level %1%").arg(
str(zoom)
)
)
else:
# in QTextEdit, super does not zoom, only scroll
super(MessageCompose, self).wheelEvent(event)
def reset(self):
"""Clear the edit content"""
self.setText('')
|
py/testdir_multi_jvm/test_many_cols_enum_multi.py | gigliovale/h2o | 882 | 12757903 | <reponame>gigliovale/h2o<filename>py/testdir_multi_jvm/test_many_cols_enum_multi.py
import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
import h2o_exec as h2e
def write_syn_dataset(csvPathname, rowCount, colCount, header, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
# header names need to be unique
if header and i==0:
r = "a" + str(j)
else:
r = "a"
rowData.append(r)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(3, java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_cols_enum(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
# (100, 11000, 0, 'cA', 180),
# (100, 10000, 1, 'cB', 180),
# (100, 8000, 1, 'cD', 180),
# (100, 7000, 0, 'cE', 180),
# (100, 6000, 1, 'cF', 180),
(100, 1000, 0, 'cH', 120),
(100, 1000, 1, 'cI', 120),
(100, 2000, 1, 'cI', 120),
(100, 3000, 1, 'cI', 120),
(100, 4000, 1, 'cI', 120),
(100, 5000, 0, 'cG', 180),
(100, 9000, 0, 'cC', 180),
(100, 10000, 1, 'cB', 180),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
cnum = 0
# it's interesting to force the first enum row to be used as header or not
# with many cols, we tend to hit limits about stuff fitting in a chunk (header or data)
for (rowCount, colCount, header, hex_key, timeoutSecs) in tryList:
cnum += 1
csvFilename = 'syn_' + str(SEED) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, header, SEED)
parseResult = h2i.import_parse(path=csvPathname, schema='put', header=header,
hex_key=hex_key, timeoutSecs=timeoutSecs)
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
if not h2o.browse_disable:
h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
time.sleep(5)
# try new offset/view
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], offset=100, view=100)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], offset=99, view=89)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], offset=-1, view=53)
if __name__ == '__main__':
h2o.unit_main()
|
test_install.py | flothesof/sfepy | 510 | 12757913 | <filename>test_install.py
#!/usr/bin/env python
"""
Simple script for testing various SfePy functionality, examples not
covered by tests, and running the tests.
The script just runs the commands specified in its main() using the
`subprocess` module, captures the output and compares one or more key
words to the expected ones.
The output of failed commands is saved to 'test_install.log' file.
"""
from __future__ import print_function
from __future__ import absolute_import
import time
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import shlex
import subprocess
import logging
import re
DEBUG_FMT = '*' * 55 + '\n%s\n' + '*' * 55
def _get_logger(filename='test_install.log'):
"""
Convenience function to set-up output and logging.
"""
logger = logging.getLogger('test_install.py')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler(filename)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
logger = _get_logger()
def check_output(cmd):
"""
Run the specified command and capture its outputs.
Returns
-------
out : tuple
The (stdout, stderr) output tuple.
"""
logger.info(cmd)
args = shlex.split(cmd)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = [ii.decode() for ii in p.communicate()]
return out
def report(out, name, line, item, value, eps=None, return_item=False,
match_numbers=False):
"""
Check that `item` at `line` of the output string `out` is equal
to `value`. If not, print the output.
"""
try:
if match_numbers:
status = out.split('\n')[line]
else:
status = out.split('\n')[line].split()
except IndexError:
logger.error(' not enough output from command!')
ok = False
else:
try:
if match_numbers:
pat = '([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?[jJ]?)'
matches = re.findall(pat, status)
status_item = matches[item]
else:
status_item = status[item]
logger.info(' comparing: %s %s', status_item, value)
if eps is None:
ok = (status_item == value)
else:
try:
ok = abs(float(status_item) - float(value)) < eps
except:
ok = False
except IndexError:
ok = False
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, status[item]
else:
return ok
def report2(out, name, items, return_item=False):
"""
Check that `items` are in the output string `out`.
If not, print the output.
"""
ok = True
for s in items:
logger.info(' checking: %s', s)
if s not in out:
ok = False
break
logger.info(' %s: %s', name, ok)
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, s
else:
return ok
def report_tests(out, return_item=False):
"""
Check that all tests in the output string `out` passed.
If not, print the output.
"""
search = re.compile('([0-9]+) test file\(s\) executed in ([0-9.]+) s, ([0-9]+) failure\(s\) of ([0-9]+) test\(s\)').search
try:
stats = search(out).groups()
except AttributeError:
stats = '0', '0', '-1', '0'
ok = False
ok = stats[2] == '0'
logger.info(' %s test file(s) executed in %s s, %s failure(s) of %s test(s)'
% (stats[0], stats[1], stats[2], stats[3]))
if not ok:
logger.debug(DEBUG_FMT, out)
if return_item:
return ok, stats[2]
else:
return ok
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.parse_args()
fd = open('test_install.log', 'w')
fd.close()
if sys.version_info[0] < 3:
cmd = 'python2'
else:
cmd = 'python3'
eok = 0
t0 = time.time()
out, err = check_output('%s ./script/blockgen.py' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/cylindergen.py' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/convert_mesh.py meshes/3d/cylinder.vtk out.mesh' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./script/tile_periodic_mesh.py -r 2,2 meshes/elements/2_4_2.mesh out-per.mesh' % cmd)
eok += report(out, '...', -2, 1, 'done.')
out, err = check_output('%s ./script/extract_surface.py meshes/various_formats/octahedron.node -' % cmd)
eok += report(out, '...', -2, 0, '1185')
out, err = check_output('%s ./simple.py examples/diffusion/poisson.py' % cmd)
eok += report(out, '...', -3, 5, '1.173819e-16', eps=1e-15)
out, err = check_output("""%s ./simple.py -c "ebc_2 : {'name' : 't2', 'region' : 'Gamma_Right', 'dofs' : {'t.0' : -5.0}}" examples/diffusion/poisson.py""" %cmd)
eok += report(out, '...', -3, 5, '2.308051e-16', eps=1e-15)
out, err = check_output('%s ./simple.py examples/diffusion/poisson_iga.py' % cmd)
eok += report(out, '...', -3, 5, '3.373487e-15', eps=1e-14)
out, err = check_output('%s ./simple.py examples/navier_stokes/stokes.py' % cmd)
eok += report(out, '...', -3, 5, '1.210678e-13', eps=1e-11)
out, err = check_output('%s ./simple.py examples/diffusion/poisson_parametric_study.py' % cmd)
eok += report(out, '...', -3, 5, '1.606408e-14', eps=1e-13)
out, err = check_output('%s ./simple.py examples/linear_elasticity/its2D_3.py' % cmd)
eok += report(out, '...', -24, 5, '3.964886e-12', eps=1e-11)
eok += report(out, '...', -4, 4, '2.58660e+01', eps=1e-5)
out, err = check_output('%s ./simple.py examples/linear_elasticity/linear_elastic.py --format h5' % cmd)
eok += report(out, '...', -3, 5, '4.638192e-18', eps=1e-15)
out, err = check_output('%s ./extractor.py -d cylinder.h5' % cmd)
eok += report(out, '...', -2, 1, '...done')
out, err = check_output('%s ./postproc.py -n --no-offscreen -o cylinder.png cylinder.h5' % cmd)
eok += report(out, '...', -3, 2, 'cylinder.png...')
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py' % cmd)
eok += report(out, '...', -9, 0, '2.08545116e+08', match_numbers=True)
eok += report(out, '...', -8, 1, '1.16309223e+11', match_numbers=True)
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py --phase-velocity' % cmd)
eok += report(out, '...', -2, 0, '4189.41229592', match_numbers=True)
eok += report(out, '...', -2, 1, '2620.55608256', match_numbers=True)
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps.py -d' % cmd)
eok += report(out, '...', -6, 1, '[0,')
out, err = check_output('%s ./phonon.py examples/phononic/band_gaps_rigid.py' % cmd)
eok += report(out, '...', -9, 0, '4.58709531e+07', match_numbers=True)
eok += report(out, '...', -8, 1, '1.13929200e+11', match_numbers=True)
out, err = check_output('%s ./simple.py examples/quantum/hydrogen.py' % cmd)
eok += report(out, '...', -2, -2, '-0.01913506', eps=1e-4)
out, err = check_output('%s ./homogen.py examples/homogenization/perfusion_micro.py' % cmd)
eok += report2(out, '...', ['computing EpA', 'computing PA_3',
'computing GA', 'computing EmA',
'computing KA'])
out, err = check_output('%s examples/homogenization/rs_correctors.py -n' % cmd)
eok += report(out, '...', -2, -1, '1.644e-01', match_numbers=True)
out, err = check_output('%s examples/large_deformation/compare_elastic_materials.py -n' % cmd)
eok += report(out, '...', -3, 5, '1.068759e-14', eps=1e-13)
out, err = check_output('%s examples/linear_elasticity/linear_elastic_interactive.py' % cmd)
eok += report(out, '...', -16, 0, '1.62128841139e-14', eps=1e-13)
out, err = check_output('%s examples/linear_elasticity/modal_analysis.py' % cmd)
eok += report(out, '...', -12, 5, '12142.11470773', eps=1e-13)
out, err = check_output('%s examples/multi_physics/thermal_electric.py' % cmd)
eok += report(out, '...', -4, 5, '2.612933e-14', eps=1e-13)
out, err = check_output('%s examples/diffusion/laplace_refine_interactive.py output' % cmd)
eok += report(out, '...', -3, 5, '2.675866e-15', eps=1e-13)
out, err = check_output('%s examples/diffusion/laplace_iga_interactive.py -o output-tests' % cmd)
eok += report(out, '...', -3, 5, '1.028134e-13', eps=1e-12)
out, err = check_output('%s examples/dg/imperative_burgers_1D.py -o output-tests' % cmd)
eok += report(out, '...', -3, 3, 'moment_1D_limiter')
out, err = check_output('mpiexec -n 2 %s examples/diffusion/poisson_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
eok += report(out, '...', -2, 4, '8.021313824020e-07', eps=1e-6)
out, err = check_output('mpiexec -n 2 %s examples/multi_physics/biot_parallel_interactive.py output-parallel -2 --silent -ksp_monitor' % cmd)
eok += report(out, '...', -2, 4, '3.787214380277e-09', eps=1e-8)
t1 = time.time()
out, err = check_output('%s ./run_tests.py' % cmd)
tok, failed = report_tests(out, return_item=True)
tok = {True : 'ok', False : 'fail'}[tok]
t2 = time.time()
fd = open('test_install_times.log', 'a+')
fd.write('%s: examples: %.2f [s] (%d), tests: %.2f [s] (%s: %s)\n'
% (time.ctime(t0), t1 - t0, eok, t2 - t1, tok, failed))
fd.close()
if __name__ == '__main__':
main()
|
cvat/apps/engine/migrations/0018_jobcommit.py | wsp-digital/cvat | 4,197 | 12757930 | # Generated by Django 2.1.7 on 2019-04-17 09:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('engine', '0017_db_redesign_20190221'),
]
operations = [
migrations.CreateModel(
name='JobCommit',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('version', models.PositiveIntegerField(default=0)),
('timestamp', models.DateTimeField(auto_now=True)),
('message', models.CharField(default='', max_length=4096)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='commits', to='engine.Job')),
],
options={
'abstract': False,
'default_permissions': (),
},
),
]
|
third_party/shaderc/src/glslc/test/parameter_tests.py | zipated/src | 2,151 | 12757943 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, StdinShader, TempFileName
@inside_glslc_testsuite('File')
class SimpleFileCompiled(expect.ValidObjectFile):
"""Tests whether or not a simple glsl file compiles."""
shader = FileShader('#version 310 es\nvoid main() {}', '.frag')
glslc_args = ['-c', shader]
@inside_glslc_testsuite('File')
class NotSpecifyingOutputName(expect.SuccessfulReturn,
expect.CorrectObjectFilePreamble):
"""Tests that when there is no -o and -E/-S/-c specified, output as a.spv."""
shader = FileShader('#version 140\nvoid main() {}', '.frag')
glslc_args = [shader]
def check_output_a_spv(self, status):
output_name = os.path.join(status.directory, 'a.spv')
return self.verify_object_file_preamble(output_name)
@inside_glslc_testsuite('Parameters')
class HelpParameters(
expect.ReturnCodeIsZero, expect.StdoutMatch, expect.StderrMatch):
"""Tests the --help flag outputs correctly and does not produce and error."""
glslc_args = ['--help']
expected_stdout = '''glslc - Compile shaders into SPIR-V
Usage: glslc [options] file...
An input file of - represents standard input.
Options:
-c Only run preprocess, compile, and assemble steps.
-Dmacro[=defn] Add an implicit macro definition.
-E Outputs only the results of the preprocessing step.
Output defaults to standard output.
-fshader-stage=<stage>
Treat subsequent input files as having stage <stage>.
Valid stages are vertex, fragment, tesscontrol, tesseval,
geometry, and compute.
-g Generate source-level debug information.
Currently this option has no effect.
--help Display available options.
--version Display compiler version information.
-I <value> Add directory to include search path.
-o <file> Write output to <file>.
A file name of '-' represents standard output.
-std=<value> Version and profile for input files. Possible values
are concatenations of version and profile, e.g. 310es,
450core, etc.
-M Generate make dependencies. Implies -E and -w.
-MM An alias for -M.
-MD Generate make dependencies and compile.
-MF <file> Write dependency output to the given file.
-MT <target> Specify the target of the rule emitted by dependency
generation.
-S Only run preprocess and compilation steps.
--target-env=<environment>
Set the target shader environment, and the semantics
of warnings and errors. Valid values are 'opengl',
'opengl_compat' and 'vulkan'. The default value is 'vulkan'.
-w Suppresses all warning messages.
-Werror Treat all warnings as errors.
-x <language> Treat subsequent input files as having type <language>.
The only supported language is glsl.
'''
expected_stderr = ''
@inside_glslc_testsuite('Parameters')
class HelpIsNotTooWide(expect.StdoutNoWiderThan80Columns):
"""Tests that --help output is not too wide."""
glslc_args = ['--help']
@inside_glslc_testsuite('Parameters')
class UnknownSingleLetterArgument(expect.ErrorMessage):
"""Tests that an unknown argument triggers an error message."""
glslc_args = ['-a']
expected_error = ["glslc: error: unknown argument: '-a'\n"]
@inside_glslc_testsuite('Parameters')
class UnknownMultiLetterArgument(expect.ErrorMessage):
"""Tests that an unknown argument triggers an error message."""
glslc_args = ['-zzz']
expected_error = ["glslc: error: unknown argument: '-zzz'\n"]
@inside_glslc_testsuite('Parameters')
class UnsupportedOption(expect.ErrorMessage):
"""Tests that an unsupported option triggers an error message."""
glslc_args = ['--unsupported-option']
expected_error = [
"glslc: error: unsupported option: '--unsupported-option'\n"]
@inside_glslc_testsuite('File')
class FileNotFound(expect.ErrorMessage):
"""Tests the error message if a file cannot be found."""
blabla_file = TempFileName('blabla.frag')
glslc_args = [blabla_file]
expected_error = [
"glslc: error: cannot open input file: '", blabla_file,
"': No such file or directory\n"]
@inside_glslc_testsuite('Unsupported')
class LinkingNotSupported(expect.ErrorMessage):
"""Tests the error message generated by linking not supported yet."""
shader1 = FileShader('#version 140\nvoid main() {}', '.vert')
shader2 = FileShader('#version 140\nvoid main() {}', '.frag')
glslc_args = [shader1, shader2]
expected_error = [
'glslc: error: linking multiple files is not supported yet. ',
'Use -c to compile files individually.\n']
@inside_glslc_testsuite('Unsupported')
class MultipleStdinUnsupported(expect.ErrorMessage):
"""Tests the error message generated by having more than one - input."""
glslc_args = ['-c', '-fshader-stage=vertex', '-', '-']
expected_error = [
'glslc: error: specifying standard input "-" as input more'
' than once is not allowed.\n']
@inside_glslc_testsuite('Parameters')
class StdinWithoutShaderStage(expect.StdoutMatch, expect.StderrMatch):
"""Tests that you must use -fshader-stage when specifying - as input."""
shader = StdinShader(
"""#version 140
int a() {
}
void main() {
int x = a();
}
""")
glslc_args = [shader]
expected_stdout = ''
expected_stderr = [
"glslc: error: '-': -fshader-stage required when input is from "
'standard input "-"\n']
|
libraries/botframework-streaming/botframework/streaming/payloads/disassemblers/cancel_disassembler.py | andreikop/botbuilder-python | 388 | 12757946 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import UUID
from botframework.streaming.payload_transport import PayloadSender
from botframework.streaming.payloads.models import Header
class CancelDisassembler:
def __init__(self, *, sender: PayloadSender, identifier: UUID, type: str):
self._sender = sender
self._identifier = identifier
self._type = type
async def disassemble(self):
header = Header(type=self._type, id=self._identifier, end=True)
header.payload_length = 0
self._sender.send_payload(header, None, True, None)
return
|
data_preprocess.py | smlin2000/OmniAnomaly | 344 | 12757965 | <gh_stars>100-1000
import ast
import csv
import os
import sys
from pickle import dump
import numpy as np
from tfsnippet.utils import makedirs
output_folder = 'processed'
makedirs(output_folder, exist_ok=True)
def load_and_save(category, filename, dataset, dataset_folder):
temp = np.genfromtxt(os.path.join(dataset_folder, category, filename),
dtype=np.float32,
delimiter=',')
print(dataset, category, filename, temp.shape)
with open(os.path.join(output_folder, dataset + "_" + category + ".pkl"), "wb") as file:
dump(temp, file)
def load_data(dataset):
if dataset == 'SMD':
dataset_folder = 'ServerMachineDataset'
file_list = os.listdir(os.path.join(dataset_folder, "train"))
for filename in file_list:
if filename.endswith('.txt'):
load_and_save('train', filename, filename.strip('.txt'), dataset_folder)
load_and_save('test', filename, filename.strip('.txt'), dataset_folder)
load_and_save('test_label', filename, filename.strip('.txt'), dataset_folder)
elif dataset == 'SMAP' or dataset == 'MSL':
dataset_folder = 'data'
with open(os.path.join(dataset_folder, 'labeled_anomalies.csv'), 'r') as file:
csv_reader = csv.reader(file, delimiter=',')
res = [row for row in csv_reader][1:]
res = sorted(res, key=lambda k: k[0])
label_folder = os.path.join(dataset_folder, 'test_label')
makedirs(label_folder, exist_ok=True)
data_info = [row for row in res if row[1] == dataset and row[0] != 'P-2']
labels = []
for row in data_info:
anomalies = ast.literal_eval(row[2])
length = int(row[-1])
label = np.zeros([length], dtype=np.bool)
for anomaly in anomalies:
label[anomaly[0]:anomaly[1] + 1] = True
labels.extend(label)
labels = np.asarray(labels)
print(dataset, 'test_label', labels.shape)
with open(os.path.join(output_folder, dataset + "_" + 'test_label' + ".pkl"), "wb") as file:
dump(labels, file)
def concatenate_and_save(category):
data = []
for row in data_info:
filename = row[0]
temp = np.load(os.path.join(dataset_folder, category, filename + '.npy'))
data.extend(temp)
data = np.asarray(data)
print(dataset, category, data.shape)
with open(os.path.join(output_folder, dataset + "_" + category + ".pkl"), "wb") as file:
dump(data, file)
for c in ['train', 'test']:
concatenate_and_save(c)
if __name__ == '__main__':
datasets = ['SMD', 'SMAP', 'MSL']
commands = sys.argv[1:]
load = []
if len(commands) > 0:
for d in commands:
if d in datasets:
load_data(d)
else:
print("""
Usage: python data_preprocess.py <datasets>
where <datasets> should be one of ['SMD', 'SMAP', 'MSL']
""")
|
tests/test_visualization_metrics.py | aniketmaurya/Chitra | 158 | 12758002 | from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pytest
from chitra.visualization.metrics import (
cm_accuracy,
detect_multilabel,
plot_confusion_matrix,
)
def test_detect_multilabel():
with pytest.raises(UserWarning):
detect_multilabel({"label1": "this will raise UserWarning"})
assert detect_multilabel([1, 2, 3, 4])
assert not detect_multilabel([0, 1, 1, 0])
def test_cm_accuracy():
x = np.asarray([[1, 2], [1, 2]])
assert cm_accuracy(x) == 0.5
@patch("chitra.visualization.metrics.plt")
def test_plot_confusion_matrix(mock_plt: Mock):
mock_plt.show = MagicMock()
y_pred = [1, 1, 0, 1]
y_true = [0, 1, 0, 1]
assert plot_confusion_matrix(y_pred, y_true) is None
mock_plt.show.assert_called_once()
|
scale/scheduler/cleanup/node.py | kaydoh/scale | 121 | 12758037 | <gh_stars>100-1000
"""Defines the class that handles a node's cleanup"""
from __future__ import unicode_literals
import logging
from job.execution.tasks.cleanup_task import CleanupTask
from scheduler.manager import scheduler_mgr
JOB_EXES_WARNING_THRESHOLD = 100
MAX_JOB_EXES_PER_CLEANUP = 25
logger = logging.getLogger(__name__)
class NodeCleanup(object):
"""This class manages all of the cleanup for a node."""
def __init__(self):
"""Constructor
"""
self._job_exes = {} # {Job Exe ID: RunningJobExecution}
def add_job_execution(self, job_exe):
"""Adds a job execution that needs to be cleaned up
:param job_exe: The job execution to add
:type job_exe: :class:`job.execution.job_exe.RunningJobExecution`
"""
self._job_exes[job_exe.id] = job_exe
def delete_job_executions(self, job_exes):
"""Deletes the given job executions since they have been cleaned up
:param job_exes: The job executions to delete
:type job_exes: [:class:`job.execution.job_exe.RunningJobExecution`]
"""
for job_exe in job_exes:
if job_exe.id in self._job_exes:
del self._job_exes[job_exe.id]
def create_next_task(self, agent_id, hostname, is_initial_cleanup_completed):
"""Creates and returns the next cleanup task that needs to be run, possibly None
:param agent_id: The node's agent ID
:type agent_id: string
:param hostname: The node's hostname
:type hostname: string
:param is_initial_cleanup_completed: Indicates if node's initial cleanup is completed
:type is_initial_cleanup_completed: bool
:returns: The next cleanup task, possibly None
:rtype: :class:`job.tasks.base_task.Task`
"""
total_job_exes = self._job_exes.values()
count = len(total_job_exes)
if count > JOB_EXES_WARNING_THRESHOLD:
logger.warning('Node %s has %d job executions waiting to be cleaned up', hostname, count)
cleanup_job_exes = []
if is_initial_cleanup_completed:
if count == 0:
# No job executions to clean, so no task
return None
for job_exe in total_job_exes:
cleanup_job_exes.append(job_exe)
if len(cleanup_job_exes) >= MAX_JOB_EXES_PER_CLEANUP:
break
return CleanupTask(scheduler_mgr.framework_id, agent_id, cleanup_job_exes)
def get_num_job_exes(self):
"""Returns the number of job executions waiting to be cleaned up
:returns: The number of job executions waiting to be cleaned up
:rtype: int
"""
return len(self._job_exes.values())
|
vlcp/service/sdn/icmpresponder.py | hubo1016/vlcp | 252 | 12758061 | import itertools
import os
import vlcp.service.sdn.ofpportmanager as ofpportmanager
import vlcp.service.kvdb.objectdb as objectdb
import vlcp.service.sdn.ioprocessing as iop
from vlcp.service.sdn.flowbase import FlowBase
from vlcp.server.module import depend, call_api
from vlcp.config.config import defaultconfig
from vlcp.event.runnable import RoutineContainer
from vlcp.service.sdn.ofpmanager import FlowInitialize
from vlcp.utils.ethernet import mac_addr_bytes, ip4_addr_bytes,ip4_icmp_payload,\
ethernet_l7, ip4_packet_l7, ip4_payload,ICMP_ECHOREPLY,icmp_bestparse,icmp_echo,\
ip_frag
from vlcp.utils.flowupdater import FlowUpdater
from vlcp.protocol.openflow.openflow import OpenflowConnectionStateEvent, OpenflowAsyncMessageEvent
from vlcp.utils.networkmodel import SubNet,RouterPort
from namedstruct.stdprim import uint16
from vlcp.event.event import M_
class ICMPResponderUpdater(FlowUpdater):
def __init__(self,connection,parent):
super(ICMPResponderUpdater,self).__init__(connection,(),('icmpresponderupdate',connection),parent._logger)
self.parent = parent
self._lastlognets = ()
self._lastlogports = ()
self._lastsubnetsinfo = dict()
self._orig_initialkeys = ()
async def main(self):
try:
self.subroutine(self._update_handler(),True,"update_handler_routine")
# use controller to reply icmp ping ,so start routine handler packet in
if not self.parent.prepush:
self.subroutine(self._icmp_packetin_handler(),True,"icmp_packetin_handler_routine")
await FlowUpdater.main(self)
finally:
if hasattr(self,"update_handler_routine"):
self.update_handler_routine.close()
if hasattr(self,"icmp_packetin_handler_routine"):
self.icmp_packetin_handler_routine.close()
async def _icmp_packetin_handler(self):
conn = self._connection
ofdef = self._connection.openflowdef
l3input = self.parent._gettableindex("l3input",self._connection.protocol.vhost)
transactid = uint16.create(os.urandom(2))
async def send_packet_out(portid,packet):
await self.execute_commands(conn,
[
ofdef.ofp_packet_out(
buffer_id = ofdef.OFP_NO_BUFFER,
in_port = ofdef.OFPP_CONTROLLER,
actions = [
ofdef.ofp_action_output(port = portid,
max_len = ofdef.OFPCML_NO_BUFFER
)
],
data = packet._tobytes()
)
])
icmp_packetin_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN,None,None,l3input,2,
self._connection,self._connection.connmark)
while True:
ev = await icmp_packetin_matcher
msg = ev.message
inport = ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields,ofdef.OXM_OF_IN_PORT))
# it must be icmp packet ...
icmp_packet = ethernet_l7.create(msg.data)
if ip_frag(icmp_packet) != 0:
# ignore fragmented packets
continue
transactid = (transactid + 1) & 0xffff
reply_packet = ip4_packet_l7((ip4_payload,ip4_icmp_payload),
(icmp_bestparse, icmp_echo),
dl_src = icmp_packet.dl_dst,
dl_dst = icmp_packet.dl_src,
ip_src = icmp_packet.ip_dst,
ip_dst = icmp_packet.ip_src,
frag_off = 0,
ttl = 128,
identifier = transactid,
icmp_type = ICMP_ECHOREPLY,
icmp_code = icmp_packet.icmp_code,
icmp_id = icmp_packet.icmp_id,
icmp_seq = icmp_packet.icmp_seq,
data = icmp_packet.data
)
self.subroutine(send_packet_out(inport,reply_packet))
async def _update_handler(self):
# when lgport,lgnet,phyport,phynet object change , receive this event from ioprocessing module
dataobjectchange = iop.DataObjectChanged.createMatcher(None,None,self._connection)
while True:
ev = await dataobjectchange
# save to instance attr , us in other method
self._lastlogports,_,self._lastlognets,_ = ev.current
self._update_walk()
def _walk_lgport(self,key,value,walk,save):
if value is not None:
save(key)
if hasattr(value,'subnet'):
try:
subnetobj = walk(value.subnet.getkey())
except KeyError:
pass
else:
save(value.subnet.getkey())
if subnetobj is not None and hasattr(subnetobj,"router"):
try:
_ = walk(subnetobj.router.getkey())
except KeyError:
pass
else:
save(subnetobj.router.getkey())
def _walk_lgnet(self,key,value,walk,save):
save(key)
# if value is None, also save its key
# means watch key, when created , we will recv event
def _update_walk(self):
lgportkeys = [p.getkey() for p,_ in self._lastlogports]
lgnetkeys = [p.getkey() for p,_ in self._lastlognets]
self._initialkeys = lgportkeys + lgnetkeys
self._orig_initialkeys = lgportkeys + lgnetkeys
self._walkerdict = dict(itertools.chain(((p,self._walk_lgport) for p in lgportkeys),
((n,self._walk_lgnet) for n in lgnetkeys)))
self.subroutine(self.restart_walk(),False)
def reset_initialkeys(self,keys,values):
# walk map logicalport --> subnet ---> routerport
# we get subnet object, add keys to initialkeys,
# when subnet update, it will restart walk ,, after we will get new routerport
subnetkeys = [k for k,v in zip(keys,values) if v is not None and not v.isdeleted() and
v.isinstance(SubNet)]
self._initialkeys = tuple(itertools.chain(self._orig_initialkeys,subnetkeys))
async def updateflow(self, connection, addvalues, removevalues, updatedvalues):
try:
allobjects = set(o for o in self._savedresult if o is not None and not o.isdeleted())
lastsubnetsinfo = self._lastsubnetsinfo
currentlognetsinfo = dict((n,id) for n,id in self._lastlognets if n in allobjects)
currentrouterportsinfo = dict((o.subnet,o) for o in allobjects
if o.isinstance(RouterPort))
currentsubnetsinfo = dict((o,(getattr(currentrouterportsinfo[o],"ip_address",getattr(o,"gateway",None)),
self.parent.inroutermac,o.network.id,currentlognetsinfo[o.network]))
for o in allobjects if o.isinstance(SubNet)
and hasattr(o,"router") and o in currentrouterportsinfo
and o.network in currentlognetsinfo
and (hasattr(currentrouterportsinfo[o],"ip_address")
or hasattr(o,"gateway"))
and ( not hasattr(o,"isexternal") or o.isexternal == False))
self._lastsubnetsinfo = currentsubnetsinfo
ofdef = connection.openflowdef
vhost = connection.protocol.vhost
l3input = self.parent._gettableindex("l3input",vhost)
cmds = []
if connection.protocol.disablenxext:
def match_network(nid):
return ofdef.create_oxm(ofdef.OXM_OF_METADATA_W, (nid & 0xffff) << 32,
b'\x00\x00\xff\xff\x00\x00\x00\x00')
else:
def match_network(nid):
return ofdef.create_oxm(ofdef.NXM_NX_REG4, nid)
# prepush or not ,, it is same , so ..
def _deleteicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_DELETE,
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
ofdef.create_oxm(ofdef.NXM_NX_REG4,networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
)
)
]
if not self.parent.prepush:
def _createicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_ADD,
# icmp to router matcher same as ip forward to router
# so priority + 1
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions = [
ofdef.ofp_action_output(
port = ofdef.OFPP_CONTROLLER,
max_len = ofdef.OFPCML_NO_BUFFER
)
]
)
]
)
]
else:
def _createicmpflows(ipaddress, macaddress, networkid):
return [
ofdef.ofp_flow_mod(
cookie = 0x2,
cookie_mask = 0xffffffffffffffff,
table_id = l3input,
command = ofdef.OFPFC_ADD,
# icmp to router matcher same as ip forward to router
# so priority + 1
priority = ofdef.OFP_DEFAULT_PRIORITY + 1,
buffer_id = ofdef.OFP_NO_BUFFER,
out_port = ofdef.OFPP_ANY,
out_group = ofdef.OFPG_ANY,
match = ofdef.ofp_match_oxm(
oxm_fields = [
match_network(networkid),
ofdef.create_oxm(ofdef.OXM_OF_ETH_DST,mac_addr_bytes(macaddress)),
ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE,ofdef.ETHERTYPE_IP),
ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST,ip4_addr_bytes(ipaddress)),
ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO,ofdef.IPPROTO_ICMP),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE,8),
ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE,0)
]
),
instructions = [
ofdef.ofp_instruction_actions(
actions = [
ofdef.nx_action_reg_move(
n_bits = 48,
src = ofdef.OXM_OF_ETH_SRC,
dst = ofdef.OXM_OF_ETH_DST
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_ETH_SRC,
ofdef.mac_addr(macaddress)
)
),
ofdef.nx_action_reg_move(
n_bits = 32,
src = ofdef.OXM_OF_IPV4_SRC,
dst = ofdef.OXM_OF_IPV4_DST
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_IPV4_SRC,
ofdef.ip4_addr(ipaddress)
)
),
ofdef.ofp_action_set_field(
field = ofdef.create_oxm(
ofdef.OXM_OF_ICMPV4_TYPE,
ICMP_ECHOREPLY
)
),
ofdef.ofp_action_nw_ttl(
nw_ttl = 128
),
ofdef.ofp_action_output(
port = ofdef.OFPP_IN_PORT
)
]
)
]
)
]
for subnet in lastsubnetsinfo.keys():
if subnet not in currentsubnetsinfo\
or (subnet in currentsubnetsinfo and lastsubnetsinfo[subnet] != currentsubnetsinfo[subnet]):
# subnet remove or subnet info changed , remove flow info
ip_address, mac_address, networkid, nid = lastsubnetsinfo[subnet]
remove_arp = {(ip_address,mac_address,networkid,True),}
await call_api(self, 'arpresponder', 'removeproxyarp', {'connection':connection,
'arpentries': remove_arp})
cmds.extend(_deleteicmpflows(ip_address,mac_address,nid))
await self.execute_commands(connection, cmds)
for subnet in currentsubnetsinfo.keys():
if subnet not in lastsubnetsinfo\
or (subnet in lastsubnetsinfo and lastsubnetsinfo[subnet] != currentsubnetsinfo[subnet]):
ip_address, mac_address, networkid, nid = currentsubnetsinfo[subnet]
add_arp = {(ip_address,mac_address,networkid,True),}
await call_api(self, 'arpresponder', 'createproxyarp', {'connection': connection,
'arpentries': add_arp})
cmds.extend(_createicmpflows(ip_address,mac_address,nid))
await self.execute_commands(connection, cmds)
except Exception:
self._logger.warning("Unexpected exception in icmp_flow_updater, ignore it! Continue",exc_info=True)
@defaultconfig
@depend(ofpportmanager.OpenflowPortManager,objectdb.ObjectDB)
class ICMPResponder(FlowBase):
"""
Respond ICMP echo (ping) requests to the gateway
"""
_tablerequest = (
("l3input",("l2input",),""),
("l2output",("l3input",),"")
)
# True : reply icmp ping with flow
# False: reply icmp ping with controller PACKET_IN/PACKET_OUT
#
# Must use prepush=True with OpenvSwitch 2.5+
#
_default_prepush = False
# "Gateway" responds with this MAC address
_default_inroutermac = '1a:23:67:59:63:33'
def __init__(self,server):
super(ICMPResponder,self).__init__(server)
self.app_routine = RoutineContainer(self.scheduler)
self.app_routine.main = self._main
self.routines.append(self.app_routine)
self._flowupdater = dict()
async def _main(self):
flowinit = FlowInitialize.createMatcher(_ismatch=lambda x: self.vhostbind is None or
x.vhost in self.vhostbind)
conndown = OpenflowConnectionStateEvent.createMatcher(state = OpenflowConnectionStateEvent.CONNECTION_DOWN,
_ismatch=lambda x:self.vhostbind is None or
x.createby.vhost in self.vhostbind)
while True:
ev, m = await M_(flowinit,conndown)
if m is flowinit:
c = ev.connection
self.app_routine.subroutine(self._init_conn(c))
if m is conndown:
c = ev.connection
self.app_routine.subroutine(self._remove_conn(c))
async def _init_conn(self,conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
updater = ICMPResponderUpdater(conn,self)
self._flowupdater[conn] = updater
updater.start()
async def _remove_conn(self,conn):
if conn in self._flowupdater:
updater = self._flowupdater.pop(conn)
updater.close()
|
11_stream/src/push_notification_to_sns.py | dpai/workshop | 2,327 | 12758065 | <reponame>dpai/workshop
from __future__ import print_function
import boto3
import base64
import os
SNS_TOPIC_ARN = os.environ["SNS_TOPIC_ARN"]
sns = boto3.client("sns")
print("Loading function")
def lambda_handler(event, context):
output = []
success = 0
failure = 0
highest_score = 0
print("event: {}".format(event))
r = event["records"]
print("records: {}".format(r))
print("type_records: {}".format(type(r)))
for record in event["records"]:
try:
# Uncomment the below line to publish the decoded data to the SNS topic.
payload = base64.b64decode(record["data"])
print("payload: {}".format(payload))
text = payload.decode("utf-8")
print("text: {}".format(text))
score = float(text)
if (score != 0) and (score > highest_score):
highest_score = score
print("New highest_score: {}".format(highest_score))
# sns.publish(TopicArn=SNS_TOPIC_ARN, Message='New anomaly score: {}'.format(text), Subject='New Reviews Anomaly Score Detected')
output.append({"recordId": record["recordId"], "result": "Ok"})
success += 1
except Exception as e:
print(e)
output.append({"recordId": record["recordId"], "result": "DeliveryFailed"})
failure += 1
if highest_score != 0:
sns.publish(
TopicArn=SNS_TOPIC_ARN,
Message="New anomaly score: {}".format(str(highest_score)),
Subject="New Reviews Anomaly Score Detected",
)
print("Successfully delivered {0} records, failed to deliver {1} records".format(success, failure))
return {"records": output}
|
docker/test/integration/minifi/validators/SegfaultValidator.py | dtrodrigues/nifi-minifi-cpp | 113 | 12758080 | from .OutputValidator import OutputValidator
class SegfaultValidator(OutputValidator):
"""
Validate that a file was received.
"""
def validate(self):
return True
|
dashboard/dashboard/pinpoint/models/tasks/evaluator.py | Martijnve23/catapult | 1,894 | 12758097 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Consolidated evaluator factory module.
This module consolidates the creation of specific evaluator combinators, used
throughout Pinpoint to evaluate task graphs we support.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from dashboard.pinpoint.models import evaluators
from dashboard.pinpoint.models.tasks import find_isolate
from dashboard.pinpoint.models.tasks import performance_bisection
from dashboard.pinpoint.models.tasks import read_value
from dashboard.pinpoint.models.tasks import run_test
EXCLUDED_PAYLOAD_KEYS = {'commits', 'swarming_request_body'}
class ExecutionEngine(evaluators.SequenceEvaluator):
def __init__(self, job):
# We gather all the evaluators from the modules we know.
super(ExecutionEngine, self).__init__(evaluators=[
evaluators.DispatchByTaskType({
'find_isolate': find_isolate.Evaluator(job),
'find_culprit': performance_bisection.Evaluator(job),
'read_value': read_value.Evaluator(job),
'run_test': run_test.Evaluator(job),
}),
# We then always lift the task payload up, skipping some of the
# larger objects that we know we are not going to need when deciding
# what the end result is.
evaluators.TaskPayloadLiftingEvaluator(
exclude_keys=EXCLUDED_PAYLOAD_KEYS)
])
|
external/iotivity/iotivity_1.2-rel/build_common/iotivityconfig/compiler/configuration.py | SenthilKumarGS/TizenRT | 1,433 | 12758103 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
class Configuration:
"""Compiler-specific configuration abstract base class"""
def __init__(self, context):
"""
Initialize the Configuration object
Arguments:
context -- the scons configure context
"""
if type(self) is Configuration:
raise TypeError('abstract class cannot be instantiated')
self._context = context # scons configure context
self._env = context.env # scons environment
def check_c99_flags(self):
"""
Check if command line flag is required to enable C99
support.
Returns 1 if no flag is required, 0 if no flag was
found, and the actual flag if one was found.
CFLAGS will be updated with appropriate C99 flag,
accordingly.
"""
return self._check_flags(self._c99_flags(),
self._c99_test_program(),
'.c',
'CFLAGS')
def check_cxx11_flags(self):
"""
Check if command line flag is required to enable C++11
support.
Returns 1 if no flag is required, 0 if no flag was
found, and the actual flag if one was found.
CXXFLAGS will be updated with appropriate C++11 flag,
accordingly.
"""
return self._check_flags(self._cxx11_flags(),
self._cxx11_test_program(),
'.cpp',
'CXXFLAGS')
def has_pthreads_support(self):
"""
Check if PThreads are supported by this system
Returns 1 if this system DOES support pthreads, 0
otherwise
"""
return self._context.TryCompile(self._pthreads_test_program(), '.c')
# --------------------------------------------------------------
# Check if flag is required to build the given test program.
#
# Arguments:
# test_flags -- list of flags that may be needed to build
# test_program
# test_program -- program used used to determine if one of the
# given flags is required to for a successful
# build
# test_extension -- file extension associated with the test
# program, e.g. '.cpp' for C++ and '.c' for C
# flags_key -- key used to retrieve compiler flags that may
# be updated by this check from the SCons
# environment
# --------------------------------------------------------------
def _check_flags(self,
test_flags,
test_program,
test_extension,
flags_key):
# Check if no additional flags are required.
ret = self._context.TryCompile(test_program,
test_extension)
if ret is 0:
# Try flags known to enable compiler features needed by
# the test program.
last_flags = self._env[flags_key]
for flag in test_flags:
self._env.Append(**{flags_key : flag})
ret = self._context.TryCompile(test_program,
test_extension)
if ret:
# Found a flag!
return flag
else:
# Restore original compiler flags for next flag
# test.
self._env.Replace(**{flags_key : last_flags})
return ret
# ------------------------------------------------------------
# Return test program to be used when checking for basic C99
# support.
#
# Subclasses should implement this template method or use the
# default test program found in the DefaultConfiguration class
# through composition.
# ------------------------------------------------------------
def _c99_test_program(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Get list of flags that could potentially enable C99 support.
#
# Subclasses should implement this template method if flags are
# needed to enable C99 support.
# --------------------------------------------------------------
def _c99_flags(self):
raise NotImplementedError('unimplemented method')
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
#
# Subclasses should implement this template method or use the
# default test program found in the DefaultConfiguration class
# through composition.
# ------------------------------------------------------------
def _cxx11_test_program(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Get list of flags that could potentially enable C++11 support.
#
# Subclasses should implement this template method if flags are
# needed to enable C++11 support.
# --------------------------------------------------------------
def _cxx11_flags(self):
raise NotImplementedError('unimplemented method')
# --------------------------------------------------------------
# Return a test program to be used when checking for PThreads
# support
#
# --------------------------------------------------------------
def _pthreads_test_program(self):
return """
#include <unistd.h>
#include <pthread.h>
int main()
{
#ifndef _POSIX_THREADS
# error POSIX Threads support not available
#endif
return 0;
}
"""
|
aps/transform/utils.py | ishine/aps | 117 | 12758104 | <reponame>ishine/aps
# Copyright 2019 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON, TORCH_VERSION
from typing import Optional, Tuple
from distutils.version import LooseVersion
if TORCH_VERSION >= LooseVersion("1.7"):
from torch.fft import fft as fft_func
else:
pass
def export_jit(transform: nn.Module) -> nn.Module:
"""
Export transform module for inference
"""
export_out = [module for module in transform if module.exportable()]
return nn.Sequential(*export_out)
def init_window(wnd: str,
frame_len: int,
device: th.device = "cpu") -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c.to(device)
def init_kernel(frame_len: int,
frame_hop: int,
window: th.Tensor,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> Tuple[th.Tensor, th.Tensor]:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window tensor
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT size: B
if round_pow_of_two or mode == "kaldi":
fft_size = 2**math.ceil(math.log2(frame_len))
else:
fft_size = frame_len
# center padding window if needed
if mode == "librosa" and fft_size != frame_len:
lpad = (fft_size - frame_len) // 2
window = tf.pad(window, (lpad, fft_size - frame_len - lpad))
if normalized:
# make K^H * K = I
S = fft_size**0.5
else:
S = 1
# W x B x 2
if TORCH_VERSION >= LooseVersion("1.7"):
K = fft_func(th.eye(fft_size) / S, dim=-1)
K = th.stack([K.real, K.imag], dim=-1)
else:
I = th.stack([th.eye(fft_size), th.zeros(fft_size, fft_size)], dim=-1)
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / fft_size
# 2 x B x W
K = th.transpose(K, 0, 2)
# 2B x 1 x W
K = th.reshape(K, (fft_size * 2, 1, K.shape[-1]))
return K.to(window.device), window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(wav: th.Tensor,
kernel: th.Tensor,
window: th.Tensor,
return_polar: bool = False,
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
STFT function implemented by conv1d (not efficient, but we don't care during training)
Args:
wav (Tensor): N x (C) x S
kernel (Tensor): STFT transform kernels, from init_kernel(...)
return_polar: return [magnitude; phase] Tensor or [real; imag] Tensor
pre_emphasis: factor of preemphasis
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor): STFT transform results
"""
wav_dim = wav.dim()
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
kernel = kernel * window
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
# follow Kaldi's Preemphasize
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
frames[:, 0] *= (1 - pre_emphasis)
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if return_polar:
mag = (real**2 + imag**2 + eps)**0.5
pha = th.atan2(imag, real)
return th.stack([mag, pha], dim=-1)
else:
return th.stack([real, imag], dim=-1)
def _inverse_stft(transform: th.Tensor,
kernel: th.Tensor,
window: th.Tensor,
return_polar: bool = False,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
iSTFT function implemented by conv1d
Args:
transform (Tensor): STFT transform results
kernel (Tensor): STFT transform kernels, from init_kernel(...)
return_polar (bool): keep same with the one in _forward_stft
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
# (N) x F x T x 2
transform_dim = transform.dim()
# if F x T x 2, reshape 1 x F x T x 2
if transform_dim == 3:
transform = th.unsqueeze(transform, 0)
if transform_dim != 4:
raise RuntimeError(f"Expect 4D tensor, but got {transform_dim}D")
if return_polar:
real = transform[..., 0] * th.cos(transform[..., 1])
imag = transform[..., 0] * th.sin(transform[..., 1])
else:
real, imag = transform[..., 0], transform[..., 1]
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
wav = tf.conv_transpose1d(packed,
kernel * window,
stride=frame_hop,
padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
num_frames = packed.shape[-1]
win_length = window.shape[0]
# W x T
win = th.repeat_interleave(window[..., None]**2, num_frames, dim=-1)
# Do OLA on windows
# v1)
I = th.eye(win_length, device=win.device)[:, None]
denorm = tf.conv_transpose1d(win[None, ...], I, stride=frame_hop, padding=0)
# v2)
# num_samples = (num_frames - 1) * frame_hop + win_length
# denorm = tf.fold(win[None, ...], (num_samples, 1), (win_length, 1),
# stride=frame_hop)[..., 0]
if center:
pad = kernel.shape[-1] // 2
wav = wav[..., pad:-pad]
denorm = denorm[..., pad:-pad]
wav = wav / (denorm + eps)
# N x S
return wav.squeeze(1)
def _pytorch_stft(wav: th.Tensor,
frame_len: int,
frame_hop: int,
n_fft: int = 512,
return_polar: bool = False,
window: str = "sqrthann",
normalized: bool = False,
onesided: bool = True,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Wrapper of PyTorch STFT function
Args:
wav (Tensor): source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
n_fft: number of the FFT size
return_polar: return the results in polar coordinate
window: window tensor
center: same definition with the parameter in librosa.stft
normalized: use normalized DFT kernel
onesided: output onesided STFT
Return:
transform (Tensor), STFT transform results
"""
if TORCH_VERSION < LooseVersion("1.7"):
raise RuntimeError("Can not use this function as TORCH_VERSION < 1.7")
wav_dim = wav.dim()
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x C x S, reshape NC x S
wav = wav.view(-1, wav.shape[-1])
# STFT: N x F x T x 2
stft = th.stft(wav,
n_fft,
hop_length=frame_hop,
win_length=window.shape[-1],
window=window,
center=center,
normalized=normalized,
onesided=onesided,
return_complex=False)
if wav_dim == 3:
N, F, T, _ = stft.shape
stft = stft.view(N, -1, F, T, 2)
# N x (C) x F x T x 2
if not return_polar:
return stft
# N x (C) x F x T
real, imag = stft[..., 0], stft[..., 1]
mag = (real**2 + imag**2 + eps)**0.5
pha = th.atan2(imag, real)
return th.stack([mag, pha], dim=-1)
def _pytorch_istft(transform: th.Tensor,
frame_len: int,
frame_hop: int,
window: th.Tensor,
n_fft: int = 512,
return_polar: bool = False,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Wrapper of PyTorch iSTFT function
Args:
transform (Tensor): results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
window: window tensor
n_fft: number of the FFT size
return_polar: keep same with _pytorch_stft
center: same definition with the parameter in librosa.stft
normalized: use normalized DFT kernel
onesided: output onesided STFT
Return:
wav (Tensor): synthetic audio
"""
if TORCH_VERSION < LooseVersion("1.7"):
raise RuntimeError("Can not use this function as TORCH_VERSION < 1.7")
transform_dim = transform.dim()
# if F x T x 2, reshape 1 x F x T x 2
if transform_dim == 3:
transform = th.unsqueeze(transform, 0)
if transform_dim != 4:
raise RuntimeError(f"Expect 4D tensor, but got {transform_dim}D")
if return_polar:
real = transform[..., 0] * th.cos(transform[..., 1])
imag = transform[..., 0] * th.sin(transform[..., 1])
transform = th.stack([real, imag], -1)
# stft is a complex tensor of PyTorch
stft = th.view_as_complex(transform)
# (N) x S
wav = th.istft(stft,
n_fft,
hop_length=frame_hop,
win_length=window.shape[-1],
window=window,
center=center,
normalized=normalized,
onesided=onesided,
return_complex=False)
return wav
def forward_stft(wav: th.Tensor,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
return_polar: bool = False,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa",
eps: float = EPSILON) -> th.Tensor:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
return_polar: return [magnitude; phase] Tensor or [real; imag] Tensor
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: STFT mode, "kaldi" or "librosa" or "torch"
Return:
transform: results of STFT
"""
window = init_window(window, frame_len, device=wav.device)
if mode == "torch":
n_fft = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
return _pytorch_stft(wav,
frame_len,
frame_hop,
n_fft=n_fft,
return_polar=return_polar,
window=window,
normalized=normalized,
onesided=onesided,
center=center,
eps=eps)
else:
kernel, window = init_kernel(frame_len,
frame_hop,
window=window,
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
kernel,
window,
return_polar=return_polar,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center,
eps=eps)
def inverse_stft(transform: th.Tensor,
frame_len: int,
frame_hop: int,
return_polar: bool = False,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa",
eps: float = EPSILON) -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
return_polar: keep same with function forward_stft(...)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: STFT mode, "kaldi" or "librosa" or "torch"
Return:
wav: synthetic signals
"""
window = init_window(window, frame_len, device=transform.device)
if mode == "torch":
n_fft = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
return _pytorch_istft(transform,
frame_len,
frame_hop,
n_fft=n_fft,
return_polar=return_polar,
window=window,
normalized=normalized,
onesided=onesided,
center=center,
eps=eps)
else:
kernel, window = init_kernel(frame_len,
frame_hop,
window,
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
kernel,
window,
return_polar=return_polar,
frame_hop=frame_hop,
onesided=onesided,
center=center,
eps=eps)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: STFT mode, "kaldi" or "librosa" or "torch"
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode: str = "librosa") -> None:
super(STFTBase, self).__init__()
if mode != "torch":
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.num_bins = self.K.shape[0] // 4 + 1
self.pre_emphasis = pre_emphasis
self.win_length = self.K.shape[2]
else:
self.K = None
w = init_window(window, frame_len)
self.w = nn.Parameter(w, requires_grad=False)
fft_size = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
self.num_bins = fft_size // 2 + 1
self.pre_emphasis = 0
self.win_length = fft_size
self.frame_len = frame_len
self.frame_hop = frame_hop
self.window = window
self.normalized = normalized
self.onesided = onesided
self.center = center
self.mode = mode
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
assert th.sum(wav_len <= self.win_length) == 0
if self.center:
wav_len += self.win_length
return th.div(wav_len - self.win_length,
self.frame_hop,
rounding_mode="trunc") + 1
def extra_repr(self) -> str:
str_repr = (
f"num_bins={self.num_bins}, win_length={self.win_length}, " +
f"stride={self.frame_hop}, window={self.window}, " +
f"center={self.center}, mode={self.mode}")
if not self.onesided:
str_repr += f", onesided={self.onesided}"
if self.pre_emphasis > 0:
str_repr += f", pre_emphasis={self.pre_emphasis}"
if self.normalized:
str_repr += f", normalized={self.normalized}"
return str_repr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(self,
wav: th.Tensor,
return_polar: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor), N x (C) x F x T x 2
"""
if self.mode == "torch":
return _pytorch_stft(wav,
self.frame_len,
self.frame_hop,
n_fft=(self.num_bins - 1) * 2,
return_polar=return_polar,
window=self.w,
normalized=self.normalized,
onesided=self.onesided,
center=self.center,
eps=eps)
else:
return _forward_stft(wav,
self.K,
self.w,
return_polar=return_polar,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center,
eps=eps)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: th.Tensor,
return_polar: bool = False,
eps: float = EPSILON) -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor): STFT output, N x F x T x 2
Return
s (Tensor): N x S
"""
if self.mode == "torch":
return _pytorch_istft(transform,
self.frame_len,
self.frame_hop,
n_fft=(self.num_bins - 1) * 2,
return_polar=return_polar,
window=self.w,
normalized=self.normalized,
onesided=self.onesided,
center=self.center,
eps=eps)
else:
return _inverse_stft(transform,
self.K,
self.w,
return_polar=return_polar,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center,
eps=eps)
|
tensorflow_gnn/models/gat_v2/layers.py | tensorflow/gnn | 611 | 12758123 | """Contains a Graph Attention Network v2 and associated layers."""
from typing import Any, Callable, Mapping, Optional, Union
import tensorflow as tf
import tensorflow_gnn as tfgnn
@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")
class GATv2Conv(tfgnn.keras.layers.AnyToAnyConvolutionBase):
"""The multi-head attention from Graph Attention Networks v2 (GATv2).
GATv2 (https://arxiv.org/abs/2105.14491) improves upon the popular
GAT architecture (https://arxiv.org/abs/1710.10903) by allowing the network
to compute a more expressive "dynamic" instead of just "static" attention,
each of whose heads is described by Equations (7), (3) and (4) in
https://arxiv.org/abs/2105.14491.
Example: GATv2-style attention on incoming edges whose result is
concatenated with the old node state and passed through a Dense layer
to compute the new node state.
```
dense = tf.keras.layers.Dense
graph = tfgnn.keras.layers.GraphUpdate(
node_sets={"paper": tfgnn.keras.layers.NodeSetUpdate(
{"cites": tfgnn.keras.layers.GATv2Conv(
message_dim, receiver_tag=tfgnn.TARGET)},
tfgnn.keras.layers.NextStateFromConcat(dense(node_state_dim)))}
)(graph)
```
This layer implements the multi-head attention of GATv2 with the following
generalizations:
* This implementation of GATv2 attends only to edges that are explicitly
stored in the input GraphTensor. Attention of a node to itself is
enabled or disabled by storing or not storing an explicit loop in the
edge set. The example above uses a separate layer to combine the old
node state with the attention result to form the new node state.
* Attention values can be computed from a sender node state that gets
broadcast onto the edge (see arg `sender_node_feature`), from an edge
feature (see arg `sender_edge_feature`), or from their concatenation
(by setting both arguments). This choice is used in place of the sender
node state $h_j$ in the defining equations cited above.
* This layer can be used with `receiver_tag=tfgnn.CONTEXT` to perform a
convolution to the context, with graph components as receivers and the
containment in graph components used in lieu of edges.
* An `edge_dropout` option is provided.
This layer can also be configured to do attention pooling from edges to
context or to receiver nodes (without regard for source nodes) by setting
`sender_node_feature=None` and setting `sender_edge_feature=...` to the
applicable edge feature name (e.g., `tfgnn.DEFAULT_FEATURE_NAME`).
Like the Keras Dense layer, if the input features have rank greater than 2,
this layer computes a point-wise attention along the last axis of the inputs.
For example, if the input features have shape [num_nodes, 2, 4, 1], then it
will perform an identical computation on each of the num_nodes * 2 * 4 input
values.
Init args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
receiver_tag: one of `tfgnn.SOURCE`, `tfgnn.TARGET` or `tfgnn.CONTEXT`.
The results of attention are aggregated for this graph piece.
If set to `tfgnn.SOURCE` or `tfgnn.TARGET`, the layer can be called for
an edge set and will aggregate results at the specified endpoint of the
edges.
If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or
node set.
If left unset for init, the tag must be passed at call time.
receiver_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME`
for use as the receiver's input feature to attention. (The attention key
is derived from this input.)
sender_node_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME`
for use as the input feature from sender nodes to attention.
IMPORANT: Must be set to `None` for use with `receiver_tag=tfgnn.CONTEXT`
on an edge set, or for pooling from edges without sender node states.
sender_edge_feature: Can be set to a feature name of the edge set to select
it as an input feature. By default, this set to `None`, which disables
this input.
IMPORTANT: Must be set for use with `receiver_tag=tfgnn.CONTEXT`
on an edge set.
use_bias: If true, a bias term is added to the transformations of query and
value inputs.
edge_dropout: Can be set to a dropout rate for edge dropout. (When pooling
nodes to context, it's the node's membership in a graph component that
is dropped out.)
attention_activation: The nonlinearity used on the transformed inputs
before multiplying with the trained weights of the attention layer.
This can be specified as a Keras layer, a tf.keras.activations.*
function, or a string understood by tf.keras.layers.Activation().
Defaults to "leaky_relu", which in turn defaults to a negative slope
of alpha=0.2.
activation: The nonlinearity applied to the final result of attention,
specified in the same ways as attention_activation.
kernel_initializer: Can be set to a `kerner_initializer` as understood
by tf.keras.layers.Dense etc.
"""
def __init__(self,
*,
num_heads: int,
per_head_channels: int,
receiver_tag: Optional[tfgnn.IncidentNodeOrContextTag] = None,
receiver_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
sender_node_feature: Optional[
tfgnn.FieldName] = tfgnn.HIDDEN_STATE,
sender_edge_feature: Optional[tfgnn.FieldName] = None,
use_bias: bool = True,
edge_dropout: float = 0.,
attention_activation: Union[str,
Callable[..., Any]] = "leaky_relu",
activation: Union[str, Callable[..., Any]] = "relu",
kernel_initializer: Union[
None, str, tf.keras.initializers.Initializer] = None,
**kwargs):
kwargs.setdefault("name", "gat_v2_conv")
super().__init__(
receiver_tag=receiver_tag,
receiver_feature=receiver_feature,
sender_node_feature=sender_node_feature,
sender_edge_feature=sender_edge_feature,
extra_receiver_ops={"softmax": tfgnn.softmax},
**kwargs)
if not self.takes_receiver_input:
raise ValueError("Receiver feature cannot be None")
if num_heads <= 0:
raise ValueError(f"Number of heads {num_heads} must be greater than 0.")
self._num_heads = num_heads
if per_head_channels <= 0:
raise ValueError(
f"Per-head channels {per_head_channels} must be greater than 0.")
self._per_head_channels = per_head_channels
self._use_bias = use_bias
if not 0 <= edge_dropout < 1:
raise ValueError(f"Edge dropout {edge_dropout} must be in [0, 1).")
self._edge_dropout = edge_dropout
self._attention_activation = tf.keras.activations.get(attention_activation)
self._activation = tf.keras.activations.get(activation)
self._kernel_initializer = kernel_initializer
# Create the transformations for the query input in all heads.
self._w_query = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias gets added to the attention features but not the outputs.
use_bias=use_bias,
name="query")
# Create the transformations for value input from sender nodes and edges.
if self.takes_sender_node_input:
self._w_sender_node = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias gets added to the attention features and the outputs.
use_bias=use_bias,
name="value_node")
else:
self._w_sender_node = None
if self.takes_sender_edge_input:
self._w_sender_edge = tf.keras.layers.Dense(
per_head_channels * num_heads,
kernel_initializer=kernel_initializer,
# This bias would be redundant with self._w_sender_node.
use_bias=use_bias and self._w_sender_node is None,
name="value_edge")
else:
self._w_sender_edge = None
if self._w_sender_node is None and self._w_sender_edge is None:
raise ValueError("GATv2Attention initialized with no inputs.")
# Create attention logits layers, one for each head. Note that we can't
# use a single Dense layer that outputs `num_heads` units because we need
# to apply a different attention function a_k to its corresponding
# W_k-transformed features.
self._attention_logits_fn = tf.keras.layers.experimental.EinsumDense(
"...ik,ki->...i",
output_shape=(None, num_heads, 1), # TODO(b/205825425): (num_heads,)
kernel_initializer=kernel_initializer,
name="attn_logits")
def get_config(self):
return dict(
num_heads=self._num_heads,
per_head_channels=self._per_head_channels,
use_bias=self._use_bias,
edge_dropout=self._edge_dropout,
attention_activation=self._attention_activation,
activation=self._activation,
kernel_initializer=self._kernel_initializer,
**super().get_config())
def convolve(self, *,
sender_node_input: Optional[tf.Tensor],
sender_edge_input: Optional[tf.Tensor],
receiver_input: Optional[tf.Tensor],
broadcast_from_sender_node: Callable[[tf.Tensor], tf.Tensor],
broadcast_from_receiver: Callable[[tf.Tensor], tf.Tensor],
pool_to_receiver: Callable[..., tf.Tensor],
extra_receiver_ops: Optional[
Mapping[str, Callable[..., Any]]] = None,
training: bool) -> tf.Tensor:
# Form the attention query for each head.
# [num_items, *extra_dims, num_heads, channels_per_head]
assert receiver_input is not None, "__init__() should have checked this."
query = broadcast_from_receiver(self._split_heads(self._w_query(
receiver_input)))
# Form the attention value by transforming the configured inputs
# and adding up the transformed values.
# [num_items, *extra_dims, num_heads, channels_per_head]
value_terms = []
if sender_node_input is not None:
value_terms.append(broadcast_from_sender_node(
self._split_heads(self._w_sender_node(sender_node_input))))
if sender_edge_input is not None:
value_terms.append(
self._split_heads(self._w_sender_edge(sender_edge_input)))
assert value_terms, "Internal error: no values, __init__ should catch this."
value = tf.add_n(value_terms)
# Compute the features from which attention logits are computed.
# [num_items, *extra_dims, num_heads, channels_per_head]
attention_features = self._attention_activation(query + value)
# Compute the attention logits and softmax to get the coefficients.
# [num_items, *extra_dims, num_heads, 1]
logits = tf.expand_dims(self._attention_logits_fn(attention_features), -1)
attention_coefficients = extra_receiver_ops["softmax"](logits)
if training:
# Apply dropout to the normalized attention coefficients, as is done in
# the original GAT paper. This should have the same effect as edge
# dropout. Also, note that tf.nn.dropout upscales the remaining values,
# which should maintain the sum-up-to-1 per node in expectation.
attention_coefficients = tf.nn.dropout(attention_coefficients,
self._edge_dropout)
# Apply the attention coefficients to the transformed query.
# [num_items, *extra_dims, num_heads, per_head_channels]
messages = value * attention_coefficients
# Take the sum of the weighted values, which equals the weighted average.
# Receivers without incoming senders get the empty sum 0.
# [num_receivers, *extra_dims, num_heads, per_head_channels]
pooled_messages = pool_to_receiver(messages, reduce_type="sum")
# Apply the nonlinearity.
pooled_messages = self._activation(pooled_messages)
pooled_messages = self._merge_heads(pooled_messages)
return pooled_messages
# The following helpers map forth and back between tensors with...
# - a separate heads dimension: shape [..., num_heads, channels_per_head],
# - all heads concatenated: shape [..., num_heads * channels_per_head].
def _split_heads(self, tensor):
extra_dims = tensor.shape[1:-1] # Possibly empty.
if not extra_dims.is_fully_defined():
raise ValueError(
"GATv2Attention requires non-ragged Tensors as inputs, "
"and GraphTensor requires these to have statically known "
f"dimensions except the first, but got {tensor.shape}")
new_shape = (-1, *extra_dims, self._num_heads, self._per_head_channels)
return tf.reshape(tensor, new_shape)
def _merge_heads(self, tensor):
num_merged = 2
extra_dims = tensor.shape[1 : -num_merged] # Possibly empty.
merged_dims = tensor.shape[-num_merged:]
if not extra_dims.is_fully_defined() or not merged_dims.is_fully_defined():
raise ValueError(
f"Unexpected unknown dimensions in shape {tensor.shape}")
new_shape = (-1, *extra_dims, merged_dims.num_elements())
return tf.reshape(tensor, new_shape)
def GATv2EdgePool(*, # To be called like a class initializer. pylint: disable=invalid-name
num_heads: int,
per_head_channels: int,
receiver_tag: Optional[tfgnn.IncidentNodeOrContextTag] = None,
receiver_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
sender_feature: tfgnn.FieldName = tfgnn.HIDDEN_STATE,
**kwargs):
"""Returns a layer for pooling edges with GATv2-style attention.
When initialized with receiver_tag SOURCE or TARGET, the returned layer can
be called on an edge set to compute the weighted sum of edge states at the
given endpoint. The weights are computed by the method of Graph Attention
Networks v2 (GATv2), except that edge states, not node states broadcast from
the edges' other endpoint, are used as input values to attention.
When initialized with receiver_tag CONTEXT, the returned layer can be called
on an edge set to do the analogous pooling of edge states to context.
NOTE: This layer cannot pool node states. For that, use GATv2Conv.
Args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
receiver_tag: The results of attention are aggregated for this graph piece.
If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or
node set.
If set to an IncidentNodeTag (e.g., `tfgnn.SOURCE` or `tfgnn.TARGET`),
the layer can be called for an edge set and will aggregate results at
the specified endpoint of the edges.
If left unset, the tag must be passed when calling the layer.
receiver_feature: By default, the default state feature of the receiver
is used to compute the attention query. A different feature name can be
selected by setting this argument.
sender_feature: By default, the default state feature of the edge set is
used to compute the attention values. A different feature name can be
selected by setting this argument.
**kwargs: Any other option for GATv2Conv, except sender_node_feature,
which is set to None.
"""
if kwargs.pop("sender_node_feature", None) is not None:
raise TypeError("GATv2EdgePool() got an unexpected keyword argument "
"'sender_node_feature'. Did you mean GATv2Conv()?")
kwargs.setdefault("name", "gat_v2_edge_pool")
return GATv2Conv(
num_heads=num_heads,
per_head_channels=per_head_channels,
receiver_tag=receiver_tag,
receiver_feature=receiver_feature,
sender_edge_feature=sender_feature,
sender_node_feature=None,
**kwargs)
def GATv2GraphUpdate(*, # To be called like a class initializer. pylint: disable=invalid-name
num_heads: int,
per_head_channels: int,
edge_set_name: str,
feature_name: str = tfgnn.HIDDEN_STATE,
name: str = "gat_v2",
**kwargs):
"""Returns a GraphUpdater layer with a Graph Attention Network V2 (GATv2).
The returned layer performs one update step of a Graph Attention Network v2
(GATv2) from https://arxiv.org/abs/2105.14491 on an edge set of a GraphTensor.
It is best suited for graphs that have just that one edge set.
For heterogeneous graphs with multiple node sets and edge sets, users are
advised to consider a GraphUpdate with one or more GATv2Conv objects
instead.
This implementation of GAT attends only to edges that are explicitly stored
in the input GraphTensor. Attention of a node to itself requires having an
explicit loop in the edge set.
Args:
num_heads: The number of attention heads.
per_head_channels: The number of channels for each attention head. This
means that the final output size will be per_head_channels * num_heads.
edge_set_name: A GATv2 update happens on this edge set and its incident
node set(s) of the input GraphTensor.
feature_name: The feature name of node states; defaults to
tfgnn.HIDDEN_STATE.
name: Optionally, a name for the layer returned.
**kwargs: Any optional arguments to GATv2Conv, see there.
"""
# Compat logic, remove in late 2021.
if "output_feature_name" in kwargs:
raise TypeError("Argument 'output_feature_name' is no longer supported.")
# Build a GraphUpdate for the target node set of the given edge_set_name.
# That needs to be deferred until we see a GraphTensorSpec that tells us
# the node_set_name.
def deferred_init_callback(spec: tfgnn.GraphTensorSpec):
node_set_name = spec.edge_sets_spec[
edge_set_name].adjacency_spec.node_set_name(tfgnn.TARGET)
node_set_updates = {
node_set_name: tfgnn.keras.layers.NodeSetUpdate(
{edge_set_name: GATv2Conv(
num_heads=num_heads, per_head_channels=per_head_channels,
receiver_tag=tfgnn.TARGET,
sender_node_feature=feature_name, receiver_feature=feature_name,
**kwargs)},
next_state=NextStateForNodeSetFromSingleEdgeSetInput(),
node_input_feature=feature_name)}
return dict(node_sets=node_set_updates)
return tfgnn.keras.layers.GraphUpdate(
deferred_init_callback=deferred_init_callback, name=name)
# For use by GATv2GraphUpdate().
@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")
class NextStateForNodeSetFromSingleEdgeSetInput(tf.keras.layers.Layer):
def call(self, inputs):
unused_node_input, edge_inputs, unused_context_input = inputs
single_edge_set_input, = edge_inputs.values() # Unpack.
return single_edge_set_input
|
jorldy/test/core/network/test_rainbow_network.py | zenoengine/JORLDY | 300 | 12758130 | import torch
from core.network.rainbow import Rainbow
def test_rainbow_call():
D_in, D_out, D_hidden = 2, 3, 4
N_atom = 5
noise_type = "factorized"
net = Rainbow(
D_in=D_in, D_out=D_out, N_atom=N_atom, noise_type=noise_type, D_hidden=D_hidden
)
batch_size = 6
mock_input = torch.rand((batch_size, D_in))
out = net(mock_input, is_train=True)
assert out.shape == (batch_size, D_out, N_atom)
|
m2cgen/interpreters/ruby/code_generator.py | Symmetry-International/m2cgen | 2,161 | 12758138 | <reponame>Symmetry-International/m2cgen
from contextlib import contextmanager
from m2cgen.interpreters.code_generator import CodeTemplate, ImperativeCodeGenerator
class RubyCodeGenerator(ImperativeCodeGenerator):
tpl_var_declaration = CodeTemplate("")
tpl_num_value = CodeTemplate("{value}")
tpl_infix_expression = CodeTemplate("({left}) {op} ({right})")
tpl_return_statement = tpl_num_value
tpl_array_index_access = CodeTemplate("{array_name}[{index}]")
tpl_if_statement = CodeTemplate("if {if_def}")
tpl_else_statement = CodeTemplate("else")
tpl_block_termination = CodeTemplate("end")
tpl_var_assignment = CodeTemplate("{var_name} = {value}")
def add_function_def(self, name, args):
func_def = f"def {name}({', '.join(args)})"
self.add_code_line(func_def)
self.increase_indent()
@contextmanager
def function_definition(self, name, args):
self.add_function_def(name, args)
yield
self.add_block_termination()
def method_invocation(self, method_name, obj, args):
return f"({obj}).{method_name}({', '.join(map(str, args))})"
def vector_init(self, values):
return f"[{', '.join(values)}]"
|
lldb/test/API/functionalities/module_cache/debug_index/TestDebugIndexCache.py | LaudateCorpus1/llvm-project | 605 | 12758184 | <filename>lldb/test/API/functionalities/module_cache/debug_index/TestDebugIndexCache.py
import glob
import json
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
import time
class DebugIndexCacheTestcase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Set the lldb module cache directory to a directory inside the build
# artifacts directory so no other tests are interfered with.
self.cache_dir = os.path.join(self.getBuildDir(), 'lldb-module-cache')
def get_module_cache_files(self, basename):
module_cache_glob = os.path.join(self.cache_dir,
"llvmcache-*%s*dwarf-index*" % (basename))
return glob.glob(module_cache_glob)
def get_stats(self, log_path=None):
"""
Get the output of the "statistics dump" and return the JSON as a
python dictionary.
"""
# If log_path is set, open the path and emit the output of the command
# for debugging purposes.
if log_path is not None:
f = open(log_path, 'w')
else:
f = None
return_obj = lldb.SBCommandReturnObject()
command = "statistics dump "
if f:
f.write('(lldb) %s\n' % (command))
self.ci.HandleCommand(command, return_obj, False)
metrics_json = return_obj.GetOutput()
if f:
f.write(metrics_json)
return json.loads(metrics_json)
def enable_lldb_index_cache(self):
self.runCmd('settings set symbols.lldb-index-cache-path "%s"' % (self.cache_dir))
self.runCmd('settings set symbols.enable-lldb-index-cache true')
@no_debug_info_test
def test_with_caching_enabled(self):
"""
Test module cache functionality for debug info index caching.
We test that a debug info index file is created for the debug
information when caching is enabled with a file that contains
at least one of each kind of DIE in ManualDWARFIndex::IndexSet.
The input file has DWARF that will fill in every member of the
ManualDWARFIndex::IndexSet class to ensure we can encode all of the
required information.
With caching enabled, we also verify that the appropriate statistics
specify that the cache file was saved to the cache.
"""
self.enable_lldb_index_cache()
src_dir = self.getSourceDir()
yaml_path = os.path.join(src_dir, "exe.yaml")
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact("main.o")
self.yaml2obj(yaml_path, obj_path)
# Create a target with the object file we just created from YAML
target = self.dbg.CreateTarget(obj_path)
self.assertTrue(target, VALID_TARGET)
debug_index_cache_files = self.get_module_cache_files('main.o')
self.assertEqual(len(debug_index_cache_files), 1,
"make sure there is one file in the module cache directory (%s) for main.o that is a debug info cache" % (self.cache_dir))
# Verify that the module statistics have the information that specifies
# if we loaded or saved the debug index and symtab to the cache
stats = self.get_stats()
module_stats = stats['modules'][0]
self.assertFalse(module_stats['debugInfoIndexLoadedFromCache'])
self.assertTrue(module_stats['debugInfoIndexSavedToCache'])
self.assertFalse(module_stats['symbolTableLoadedFromCache'])
self.assertTrue(module_stats['symbolTableSavedToCache'])
# Verify the top level stats track how many things were loaded or saved
# to the cache.
self.assertEqual(stats["totalDebugInfoIndexLoadedFromCache"], 0)
self.assertEqual(stats["totalDebugInfoIndexSavedToCache"], 1)
self.assertEqual(stats["totalSymbolTablesLoadedFromCache"], 0)
self.assertEqual(stats["totalSymbolTablesSavedToCache"], 1)
@no_debug_info_test
def test_with_caching_disabled(self):
"""
Test module cache functionality for debug info index caching.
We test that a debug info index file is not created for the debug
information when caching is disabled with a file that contains
at least one of each kind of DIE in ManualDWARFIndex::IndexSet.
The input file has DWARF that will fill in every member of the
ManualDWARFIndex::IndexSet class to ensure we can encode all of the
required information.
With caching disabled, we also verify that the appropriate
statistics specify that the cache file was not saved to the cache.
"""
src_dir = self.getSourceDir()
yaml_path = os.path.join(src_dir, "exe.yaml")
yaml_base, ext = os.path.splitext(yaml_path)
obj_path = self.getBuildArtifact("main.o")
self.yaml2obj(yaml_path, obj_path)
# Create a target with the object file we just created from YAML
target = self.dbg.CreateTarget(obj_path)
self.assertTrue(target, VALID_TARGET)
debug_index_cache_files = self.get_module_cache_files('main.o')
self.assertEqual(len(debug_index_cache_files), 0,
"make sure there is no file in the module cache directory (%s) for main.o that is a debug info cache" % (self.cache_dir))
# Verify that the module statistics have the information that specifies
# if we loaded or saved the debug index and symtab to the cache
stats = self.get_stats()
module_stats = stats['modules'][0]
self.assertFalse(module_stats['debugInfoIndexLoadedFromCache'])
self.assertFalse(module_stats['debugInfoIndexSavedToCache'])
self.assertFalse(module_stats['symbolTableLoadedFromCache'])
self.assertFalse(module_stats['symbolTableSavedToCache'])
# Verify the top level stats track how many things were loaded or saved
# to the cache.
self.assertEqual(stats["totalDebugInfoIndexLoadedFromCache"], 0)
self.assertEqual(stats["totalDebugInfoIndexSavedToCache"], 0)
self.assertEqual(stats["totalSymbolTablesLoadedFromCache"], 0)
self.assertEqual(stats["totalSymbolTablesSavedToCache"], 0)
|
semantic_segmentation/main.py | pedrohtg/pytorch | 205 | 12758188 | <filename>semantic_segmentation/main.py
from argparse import ArgumentParser
import os
import random
from matplotlib import pyplot as plt
import torch
from torch import optim
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import models
from torchvision.utils import save_image
from data import CityscapesDataset, num_classes, full_to_colour, train_to_full
from model import FeatureResNet, SegResNet
# Setup
parser = ArgumentParser(description='Semantic segmentation')
parser.add_argument('--seed', type=int, default=42, help='Random seed')
parser.add_argument('--workers', type=int, default=8, help='Data loader workers')
parser.add_argument('--epochs', type=int, default=100, help='Training epochs')
parser.add_argument('--crop-size', type=int, default=512, help='Training crop size')
parser.add_argument('--lr', type=float, default=5e-5, help='Learning rate')
parser.add_argument('--momentum', type=float, default=0, help='Momentum')
parser.add_argument('--weight-decay', type=float, default=2e-4, help='Weight decay')
parser.add_argument('--batch-size', type=int, default=16, help='Batch size')
args = parser.parse_args()
random.seed(args.seed)
torch.manual_seed(args.seed)
if not os.path.exists('results'):
os.makedirs('results')
plt.switch_backend('agg') # Allow plotting when running remotely
# Data
train_dataset = CityscapesDataset(split='train', crop=args.crop_size, flip=True)
val_dataset = CityscapesDataset(split='val')
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
val_loader = DataLoader(val_dataset, batch_size=1, num_workers=args.workers, pin_memory=True)
# Training/Testing
pretrained_net = FeatureResNet()
pretrained_net.load_state_dict(models.resnet34(pretrained=True).state_dict())
net = SegResNet(num_classes, pretrained_net).cuda()
crit = nn.BCELoss().cuda()
# Construct optimiser
params_dict = dict(net.named_parameters())
params = []
for key, value in params_dict.items():
if 'bn' in key:
# No weight decay on batch norm
params += [{'params': [value], 'weight_decay': 0}]
elif '.bias' in key:
# No weight decay plus double learning rate on biases
params += [{'params': [value], 'lr': 2 * args.lr, 'weight_decay': 0}]
else:
params += [{'params': [value]}]
optimiser = optim.RMSprop(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scores, mean_scores = [], []
def train(e):
net.train()
for i, (input, target, _) in enumerate(train_loader):
optimiser.zero_grad()
input, target = Variable(input.cuda(async=True)), Variable(target.cuda(async=True))
output = F.sigmoid(net(input))
loss = crit(output, target)
print(e, i, loss.data[0])
loss.backward()
optimiser.step()
# Calculates class intersections over unions
def iou(pred, target):
ious = []
# Ignore IoU for background class
for cls in range(num_classes - 1):
pred_inds = pred == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).long().sum().data.cpu()[0] # Cast to long to prevent overflows
union = pred_inds.long().sum().data.cpu()[0] + target_inds.long().sum().data.cpu()[0] - intersection
if union == 0:
ious.append(float('nan')) # If there is no ground truth, do not include in evaluation
else:
ious.append(intersection / max(union, 1))
return ious
def test(e):
net.eval()
total_ious = []
for i, (input, _, target) in enumerate(val_loader):
input, target = Variable(input.cuda(async=True), volatile=True), Variable(target.cuda(async=True), volatile=True)
output = F.log_softmax(net(input))
b, _, h, w = output.size()
pred = output.permute(0, 2, 3, 1).contiguous().view(-1, num_classes).max(1)[1].view(b, h, w)
total_ious.append(iou(pred, target))
# Save images
if i % 25 == 0:
pred = pred.data.cpu()
pred_remapped = pred.clone()
# Convert to full labels
for k, v in train_to_full.items():
pred_remapped[pred == k] = v
# Convert to colour image
pred = pred_remapped
pred_colour = torch.zeros(b, 3, h, w)
for k, v in full_to_colour.items():
pred_r = torch.zeros(b, 1, h, w)
pred_r[(pred == k)] = v[0]
pred_g = torch.zeros(b, 1, h, w)
pred_g[(pred == k)] = v[1]
pred_b = torch.zeros(b, 1, h, w)
pred_b[(pred == k)] = v[2]
pred_colour.add_(torch.cat((pred_r, pred_g, pred_b), 1))
save_image(pred_colour[0].float().div(255), os.path.join('results', str(e) + '_' + str(i) + '.png'))
# Calculate average IoU
total_ious = torch.Tensor(total_ious).transpose(0, 1)
ious = torch.Tensor(num_classes - 1)
for i, class_iou in enumerate(total_ious):
ious[i] = class_iou[class_iou == class_iou].mean() # Calculate mean, ignoring NaNs
print(ious, ious.mean())
scores.append(ious)
# Save weights and scores
torch.save(net.state_dict(), os.path.join('results', str(e) + '_net.pth'))
torch.save(scores, os.path.join('results', 'scores.pth'))
# Plot scores
mean_scores.append(ious.mean())
es = list(range(len(mean_scores)))
plt.plot(es, mean_scores, 'b-')
plt.xlabel('Epoch')
plt.ylabel('Mean IoU')
plt.savefig(os.path.join('results', 'ious.png'))
plt.close()
test(0)
for e in range(1, args.epochs + 1):
train(e)
test(e)
|
tests/functional/custom_singular_tests/test_custom_singular_tests.py | tomasfarias/dbt-core | 799 | 12758210 | <gh_stars>100-1000
import pytest
from pathlib import Path
from dbt.tests.util import run_dbt
# from `test/integration/009_data_test`
#
# Models
#
models__table_copy = """
{{
config(
materialized='table'
)
}}
select * from {{ this.schema }}.seed
"""
#
# Tests
#
tests__fail_email_is_always_null = """
select *
from {{ ref('table_copy') }}
where email is not null
"""
tests__fail_no_ref = """
select 1
"""
tests__dotted_path_pass_id_not_null = """
{# Same as `pass_id_not_null` but with dots in its name #}
select *
from {{ ref('table_copy') }}
where id is null
"""
tests__pass_id_not_null = """
select *
from {{ ref('table_copy') }}
where id is null
"""
tests__pass_no_ref = """
select 1 limit 0
"""
class CustomSingularTestsBase(object):
@pytest.fixture(scope="class", autouse=True)
def setUp(self, project):
"""Create seed and downstream model tests are to be run on"""
project.run_sql_file(project.test_data_dir / Path("seed_expected.sql"))
results = run_dbt()
assert len(results) == 1
@pytest.fixture(scope="class")
def models(self):
return {"table_copy.sql": models__table_copy}
class TestPassingTests(CustomSingularTestsBase):
@pytest.fixture(scope="class")
def tests(self):
return {
"my_db.my_schema.table_copy.pass_id_not_null.sql": tests__dotted_path_pass_id_not_null,
"tests__pass_id_not_null.sql": tests__pass_id_not_null,
"tests__pass_no_ref.sql": tests__pass_no_ref,
}
def test_data_tests(self, project, tests):
test_results = run_dbt(["test"])
assert len(test_results) == len(tests)
for result in test_results:
assert result.status == "pass"
assert not result.skipped
assert result.failures == 0
class TestFailingTests(CustomSingularTestsBase):
@pytest.fixture(scope="class")
def tests(self):
return {
"tests__fail_email_is_always_null.sql": tests__fail_email_is_always_null,
"tests__fail_no_ref.sql": tests__fail_no_ref,
}
def test_data_tests(self, project, tests):
"""assert that all deliberately failing tests actually fail"""
test_results = run_dbt(["test"], expect_pass=False)
assert len(test_results) == len(tests)
for result in test_results:
assert result.status == "fail"
assert not result.skipped
assert result.failures > 0
|
linear_attention_transformer/__init__.py | lucidrains/linear-attention | 361 | 12758228 | from linear_attention_transformer.linear_attention_transformer import LinearAttentionTransformer, LinearAttentionTransformerLM, LinformerSettings, LinformerContextSettings
from linear_attention_transformer.autoregressive_wrapper import AutoregressiveWrapper
from linear_attention_transformer.images import ImageLinearAttention
|
datadog_checks_base/tests/base/checks/win/test_winpdh.py | mchelen-gov/integrations-core | 663 | 12758258 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
from collections import defaultdict
import pytest
from datadog_checks.dev.testing import requires_windows
try:
from datadog_test_libs.win.pdh_mocks import ( # noqa: F401
initialize_pdh_tests,
pdh_mocks_fixture,
pdh_mocks_fixture_bad_perf_strings,
)
from datadog_checks.checks.win.winpdh import SINGLE_INSTANCE_KEY, WinPDHCounter
except ImportError:
import platform
if platform.system() != 'Windows':
pass
logger = logging.getLogger(__file__)
'''
WinPDHCounter tests.
Test specific behavior of the WinPDHCounter class, which provides
the interface to the OS API.
'''
@requires_windows
def test_winpdhcounter_bad_strings_english(pdh_mocks_fixture_bad_perf_strings): # noqa F811
initialize_pdh_tests()
counter = WinPDHCounter('System', 'Processor Queue Length', logger)
vals = counter.get_all_values()
assert len(vals) == 1 # single instance key, should only have one value
assert SINGLE_INSTANCE_KEY in vals
@requires_windows
def test_winpdhcounter_throws_on_bad_input(pdh_mocks_fixture): # noqa F811
initialize_pdh_tests()
with pytest.raises(AttributeError):
WinPDHCounter('Ssystem', 'Processor Queue Length', logger)
with pytest.raises(AttributeError):
WinPDHCounter('System', 'PProcessor Queue Length', logger)
@requires_windows
def test_winpdhcounter_throws_on_bad_input_with_bad_strings(pdh_mocks_fixture_bad_perf_strings): # noqa F811
initialize_pdh_tests()
with pytest.raises(AttributeError):
WinPDHCounter('Ssystem', 'Processor Queue Length', logger)
with pytest.raises(AttributeError):
WinPDHCounter('System', 'PProcessor Queue Length', logger)
@requires_windows
def test_winpdhcounter_bad_strings_not_english(pdh_mocks_fixture_bad_perf_strings): # noqa F811
WinPDHCounter._use_en_counter_names = False
WinPDHCounter.pdh_counter_dict = defaultdict(list)
initialize_pdh_tests(lang="se-sv")
'''
expectation is that the initialization will fail. We attempt to fall
back to english counters if the strings database isn't present; however,
on non-english windows the english counters won't work
'''
with pytest.raises(AttributeError):
WinPDHCounter('System', 'Processor Queue Length', logger)
@requires_windows
def test_winpdhcounter_non_english(pdh_mocks_fixture): # noqa F811
WinPDHCounter._use_en_counter_names = False
WinPDHCounter.pdh_counter_dict = defaultdict(list)
initialize_pdh_tests(lang="se-sv")
counter = WinPDHCounter('System', 'Processor Queue Length', logger)
vals = counter.get_all_values()
assert len(vals) == 1 # single instance key, should only have one value
assert SINGLE_INSTANCE_KEY in vals
|
OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts/start_oid_component.py | rmohare/oracle-product-images | 5,519 | 12758264 | #!/usr/bin/python
#
# Copyright (c) 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
# Author: <NAME>
#
import os, sys, re
domain_name = os.environ.get("DOMAIN_NAME", "oid_domain")
oracle_home = os.environ.get("ORACLE_HOME", "/u01/oracle/")
weblogic_home = '/u01/oracle/wlserver'
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-username':
user = sys.argv[i + 1]
i += 2
elif sys.argv[i] == '-adminpassword':
password = sys.argv[i + 1]
i += 2
elif sys.argv[i] == '-instance_Name':
instanceName= sys.argv[i + 1]
i += 2
else:
print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i])
sys.exit(1)
try:
nmConnect(domainName=domain_name,username=user,password=password,nmType='ssl')
nmServerStatus(serverName=instanceName,serverType='OID')
nmStart(serverName=instanceName,serverType='OID')
exit()
except:
print 'Unable to start '+instanceName
exit()
|
kfac/examples/mnist.py | ntselepidis/kfac | 179 | 12758265 | <filename>kfac/examples/mnist.py<gh_stars>100-1000
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for loading MNIST into TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf
__all__ = [
'load_mnist_as_tensors',
'load_mnist_as_dataset',
'load_mnist_as_iterator',
]
def load_mnist_as_tensors(flatten_images=True, dtype=tf.float32):
"""Loads MNIST as Tensors.
Args:
flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into
[784]-shaped vectors.
dtype: The TF dtype to return the images as.
Returns:
images, labels, num_examples
"""
# mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets(
# '/tmp/mnist', reshape=flatten_images)
# num_examples = len(mnist_data.train.labels)
# images = mnist_data.train.images
# labels = mnist_data.train.labels
#
# images = tf.constant(np.asarray(images, dtype=np.float32))
# labels = tf.constant(np.asarray(labels, dtype=np.int64))
#
# return images, labels, num_examples
(images, labels), _ = tf.keras.datasets.mnist.load_data()
num_examples = images.shape[0]
if flatten_images:
images = images.reshape(images.shape[0], 28**2)
else:
images = images.reshape(images.shape[0], 28, 28, 1)
images = images.astype('float64')
labels = labels.astype('int32')
images /= 255.
images = tf.constant(images, dtype=dtype)
labels = tf.constant(labels)
return images, labels, num_examples
def load_mnist_as_dataset(flatten_images=True):
"""Loads MNIST as a Dataset object.
Args:
flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into
[784]-shaped vectors.
Returns:
dataset, num_examples, where dataset is a Dataset object containing the
whole MNIST training dataset and num_examples is the number of examples
in the MNIST dataset (should be 60000).
"""
images, labels, num_examples = load_mnist_as_tensors(
flatten_images=flatten_images)
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
return dataset, num_examples
def load_mnist_as_iterator(num_epochs, batch_size,
use_fake_data=False,
flatten_images=True):
"""Loads MNIST dataset as an iterator Tensor.
Args:
num_epochs: int. Number of passes to make over the dataset.
batch_size: int. Number of examples per minibatch.
use_fake_data: bool. If True, generate a synthetic dataset rather than
reading MNIST in.
flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into
[784]-shaped vectors.
Returns:
examples: Tensor of shape [batch_size, 784] if 'flatten_images' is
True, else [batch_size, 28, 28, 1]. Each row is one example.
Values in [0, 1].
labels: Tensor of shape [batch_size]. Indices of integer corresponding to
each example. Values in {0...9}.
"""
if use_fake_data:
rng = np.random.RandomState(42)
num_examples = batch_size * 4
images = rng.rand(num_examples, 28 * 28)
if not flatten_images:
images = np.reshape(images, [num_examples, 28, 28, 1])
labels = rng.randint(10, size=num_examples)
dataset = tf.data.Dataset.from_tensor_slices((np.asarray(
images, dtype=np.float32), np.asarray(labels, dtype=np.int64)))
else:
dataset, num_examples = load_mnist_as_dataset(flatten_images=flatten_images)
dataset = (dataset.shuffle(num_examples).repeat(num_epochs)
.batch(batch_size).prefetch(5))
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
|
metrics/kvret/evaluator.py | HKUNLP/UnifiedSKG | 191 | 12758298 | <reponame>HKUNLP/UnifiedSKG<gh_stars>100-1000
# encoding=utf8
from collections import OrderedDict
import json
import nltk
from datasets import load_metric
def load_entities(kvret_entity_file_path):
"""
@param kvret_entity_file_path: the path of kvret_entities.json
@return:
"""
under_scored_entity_dict = OrderedDict()
with open(kvret_entity_file_path) as f:
entity = json.load(f)
for sub_class_name, sub_class_entity_list in entity.items():
if sub_class_name == 'poi':
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item['address'])] = (
str(entity_item['address']).replace(" ", "_"))
under_scored_entity_dict[str(entity_item['poi'])] = (str(entity_item['poi']).replace(" ", "_"))
under_scored_entity_dict[str(entity_item['type'])] = (str(entity_item['type']).replace(" ", "_"))
elif sub_class_name == "distance":
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item) + " miles"] = str(entity_item) + " miles"
elif sub_class_name == "temperature":
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item) + "f"] = str(entity_item) + "f"
else:
for entity_item in sub_class_entity_list:
under_scored_entity_dict[str(entity_item)] = (str(entity_item).replace(" ", "_"))
# add missing entities,
missed_entities = ["yoga", "tennis", "swimming", "football", " lab ", "doctor", "optometrist", "dentist", "1st",
"2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th",
"11th", "12th", "13th", "14th", "15th", "16th", "17th", "18th", "19th", "20th", "Jill",
"Jack"]
for missed_entity in missed_entities:
under_scored_entity_dict[str(missed_entity)] = (missed_entity)
# special handle of "HR"
del under_scored_entity_dict['HR']
under_scored_entity_dict[' HR '] = ' HR '
return under_scored_entity_dict
def postprocess_text(preds, responses, metric_name):
_preds = [pred.strip() for pred in preds]
_responses = [response.strip() for response in responses]
# rougeLSum expects newline after each sentence
if metric_name == "rouge":
_preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in _preds]
_responses = ["\n".join(nltk.sent_tokenize(response)) for response in _responses]
elif metric_name == "sacrebleu": # sacrebleu
_responses = [[response] for response in _responses]
elif metric_name == "bleu":
_preds = [pred.split(" ") for pred in _preds]
_responses = [[response.split(" ")] for response in _responses]
else:
pass
return _preds, _responses
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
assert len(golds) > 0
global_entities = load_entities(golds[0]["entities_file"])
metric_list = []
if section in ["train", "dev"]:
metric_list = ["bleu"]
elif section == "test":
metric_list = ["bleu", "metrics/kvret/response_entity_hit.py"]
for metric_name in metric_list:
metric = load_metric(metric_name)
if metric_name == "metrics/kvret/response_entity_hit.py":
gold_responses = [
{
"response": item["seq_out"],
"intents": [item["intent"]],
}
for item in golds
]
res = metric.compute(
**{
"predictions": preds,
"references": gold_responses,
"global_entities": global_entities,
}
)
summary.update(res)
else:
gold_responses = [item["seq_out"] for item in golds]
processed_preds, processed_golds = postprocess_text(
preds, gold_responses, metric_name
)
res = metric.compute(
predictions=processed_preds,
references=processed_golds,
)
summary[metric_name] = res[metric_name]
return summary |
examples/backends/plot_unmix_optim_torch.py | kguerda-idris/POT | 830 | 12758308 | # -*- coding: utf-8 -*-
r"""
=================================
Wasserstein unmixing with PyTorch
=================================
In this example we estimate mixing parameters from distributions that minimize
the Wasserstein distance. In other words we suppose that a target
distribution :math:`\mu^t` can be expressed as a weighted sum of source
distributions :math:`\mu^s_k` with the following model:
.. math::
\mu^t = \sum_{k=1}^K w_k\mu^s_k
where :math:`\mathbf{w}` is a vector of size :math:`K` and belongs in the
distribution simplex :math:`\Delta_K`.
In order to estimate this weight vector we propose to optimize the Wasserstein
distance between the model and the observed :math:`\mu^t` with respect to
the vector. This leads to the following optimization problem:
.. math::
\min_{\mathbf{w}\in\Delta_K} \quad W \left(\mu^t,\sum_{k=1}^K w_k\mu^s_k\right)
This minimization is done in this example with a simple projected gradient
descent in PyTorch. We use the automatic backend of POT that allows us to
compute the Wasserstein distance with :any:`ot.emd2` with
differentiable losses.
"""
# Author: <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 2
import numpy as np
import matplotlib.pylab as pl
import ot
import torch
##############################################################################
# Generate data
# -------------
#%% Data
nt = 100
nt1 = 10 #
ns1 = 50
ns = 2 * ns1
rng = np.random.RandomState(2)
xt = rng.randn(nt, 2) * 0.2
xt[:nt1, 0] += 1
xt[nt1:, 1] += 1
xs1 = rng.randn(ns1, 2) * 0.2
xs1[:, 0] += 1
xs2 = rng.randn(ns1, 2) * 0.2
xs2[:, 1] += 1
xs = np.concatenate((xs1, xs2))
# Sample reweighting matrix H
H = np.zeros((ns, 2))
H[:ns1, 0] = 1 / ns1
H[ns1:, 1] = 1 / ns1
# each columns sums to 1 and has weights only for samples form the
# corresponding source distribution
M = ot.dist(xs, xt)
##############################################################################
# Plot data
# ---------
#%% plot the distributions
pl.figure(1)
pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5)
pl.scatter(xs1[:, 0], xs1[:, 1], label='Source $\mu^s_1$', alpha=0.5)
pl.scatter(xs2[:, 0], xs2[:, 1], label='Source $\mu^s_2$', alpha=0.5)
pl.title('Sources and Target distributions')
pl.legend()
##############################################################################
# Optimization of the model wrt the Wasserstein distance
# ------------------------------------------------------
#%% Weights optimization with gradient descent
# convert numpy arrays to torch tensors
H2 = torch.tensor(H)
M2 = torch.tensor(M)
# weights for the source distributions
w = torch.tensor(ot.unif(2), requires_grad=True)
# uniform weights for target
b = torch.tensor(ot.unif(nt))
lr = 2e-3 # learning rate
niter = 500 # number of iterations
losses = [] # loss along the iterations
# loss for the minimal Wasserstein estimator
def get_loss(w):
a = torch.mv(H2, w) # distribution reweighting
return ot.emd2(a, b, M2) # squared Wasserstein 2
for i in range(niter):
loss = get_loss(w)
losses.append(float(loss))
loss.backward()
with torch.no_grad():
w -= lr * w.grad # gradient step
w[:] = ot.utils.proj_simplex(w) # projection on the simplex
w.grad.zero_()
##############################################################################
# Estimated weights and convergence of the objective
# ---------------------------------------------------
we = w.detach().numpy()
print('Estimated mixture:', we)
pl.figure(2)
pl.semilogy(losses)
pl.grid()
pl.title('Wasserstein distance')
pl.xlabel("Iterations")
##############################################################################
# Ploting the reweighted source distribution
# ------------------------------------------
pl.figure(3)
# compute source weights
ws = H.dot(we)
pl.scatter(xt[:, 0], xt[:, 1], label='Target $\mu^t$', alpha=0.5)
pl.scatter(xs[:, 0], xs[:, 1], color='C3', s=ws * 20 * ns, label='Weighted sources $\sum_{k} w_k\mu^s_k$', alpha=0.5)
pl.title('Target and reweighted source distributions')
pl.legend()
|
IntelligentUAVPathPlanningSimulationSystem/core/main.py | wangwei39120157028/IntelligentUAVPathPlanningSimulationSystem-Drone | 208 | 12758323 | # -*- coding:utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from appUI.MainWindow import main
if __name__ == "__main__":
#
main()
|
js2py/internals/prototypes/jsboolean.py | renesugar/Js2Py | 1,926 | 12758332 | from __future__ import unicode_literals
from ..conversions import *
from ..func_utils import *
class BooleanPrototype:
def toString(this, args):
if GetClass(this) != 'Boolean':
raise MakeError('TypeError',
'Boolean.prototype.toString is not generic')
if is_object(this):
this = this.value
return u'true' if this else u'false'
def valueOf(this, args):
if GetClass(this) != 'Boolean':
raise MakeError('TypeError',
'Boolean.prototype.valueOf is not generic')
if is_object(this):
this = this.value
return this
|
app/my_bokeh_app.py | adarsh0806/scipy2015-blaze-bokeh | 168 | 12758365 | # -*- coding: utf-8 -*-
import math
from collections import OrderedDict
import flask
import pandas as pd
import netCDF4
import numpy as np
from bokeh.embed import components
from bokeh.resources import INLINE
from bokeh.templates import RESOURCES
from bokeh.util.string import encode_utf8
from bokeh.models import DatetimeTickFormatter, ColumnDataSource, HoverTool, Plot, Range1d
from bokeh.palettes import RdBu11
from bokeh.models.glyphs import Text, Rect
from bokeh.plotting import figure, show, output_notebook, hplot, vplot
import utils.world_countries as wc
from utils.colormap import RGBAColorMapper
from viz2 import climate_map, timeseries, legend, title, get_slice
app = flask.Flask(__name__)
colormap = RGBAColorMapper(-6, 6, RdBu11)
@app.route("/")
def index():
# Create layout
c_map = climate_map()
ts = timeseries()
l = legend()
t = title()
map_legend = hplot(c_map, l)
layout = vplot(t, map_legend, ts)
plot_resources = RESOURCES.render(
js_raw=INLINE.js_raw,
css_raw=INLINE.css_raw,
js_files=INLINE.js_files,
css_files=INLINE.css_files,
)
script, div = components(layout, INLINE)
html = flask.render_template(
'embed.html',
plot_script=script,
plot_div=div,
plot_resources=plot_resources,
)
return encode_utf8(html)
if __name__ == "__main__":
app.run(debug=True) |
scripts/compute_possible_instructions.py | m-smith/babyai | 411 | 12758377 | <reponame>m-smith/babyai<filename>scripts/compute_possible_instructions.py
#!/usr/bin/env python3
"""
Compute the number of possible instructions in the BabyAI grammar.
"""
from gym_minigrid.minigrid import COLOR_NAMES
def count_Sent():
return (
count_Sent1() +
# Sent1, then Sent1
count_Sent1() * count_Sent1() +
# Sent1 after you Sent1
count_Sent1() * count_Sent1()
)
def count_Sent1():
return (
count_Clause() +
# Clause and Clause
count_Clause() * count_Clause()
)
def count_Clause():
return (
# go to
count_Descr() +
# pick up
count_DescrNotDoor() +
# open
count_DescrDoor() +
# put next
count_DescrNotDoor() * count_Descr()
)
def count_DescrDoor():
# (the|a) Color door Location
return 2 * count_Color() * count_LocSpec()
def count_DescrBall():
return count_DescrDoor()
def count_DescrBox():
return count_DescrDoor()
def count_DescrKey():
return count_DescrDoor()
def count_Descr():
return count_DescrDoor() + count_DescrBall() + count_DescrBox() + count_DescrKey()
def count_DescrNotDoor():
return count_DescrBall() + count_DescrBox() + count_DescrKey()
def count_Color():
# Empty string or color
return len([None] + COLOR_NAMES)
def count_LocSpec():
# Empty string or location
return len([None, 'left', 'right', 'front', 'behind'])
print('DescrKey: ', count_DescrKey())
print('Descr: ', count_Descr())
print('DescrNotDoor: ', count_DescrNotDoor())
print('Clause: ', count_Clause())
print('Sent1: ', count_Sent1())
print('Sent: ', count_Sent())
print('Sent: {:.3g}'.format(count_Sent()))
|
sdk/core/azure-common/tests/test_credentials.py | rsdoherty/azure-sdk-for-python | 2,728 | 12758450 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
try:
from unittest import mock
except ImportError:
import mock
import pytest
from azure.common.credentials import _CliCredentials
import azure.common.credentials
class MockCliCredentials:
def _token_retriever(self):
return "NOTUSED", "TOKEN", {'expiresIn': 42}
def signed_session(self, session=None):
return session
class MockCliProfile:
def __init__(self):
self.received_resource = None
def get_login_credentials(self, resource):
self.received_resource = resource
return MockCliCredentials(), "NOTUSED", "NOTUSED"
def test_cli_credentials_mgmt():
cli_profile = MockCliProfile()
cred = _CliCredentials(cli_profile, "http://resource.id")
# Mgmt scenario
session = cred.signed_session("session")
assert cli_profile.received_resource == "http://resource.id"
assert session == "session"
# Trying to mock azure-core not here
with mock.patch('azure.common.credentials._AccessToken', None):
# Should not crash
cred.signed_session("session")
def test_cli_credentials_accesstoken():
cli_profile = MockCliProfile()
cred = _CliCredentials(cli_profile, "http://resource.id")
# Track2 scenario
access_token = cred.get_token("http://resource.id/.default")
assert cli_profile.received_resource == "http://resource.id"
assert access_token.token == "TOKEN"
assert access_token.expires_on <= int(time.time() + 42)
access_token = cred.get_token("http://resource.newid")
assert cli_profile.received_resource == "http://resource.newid"
# Trying to mock azure-core not here
with mock.patch('azure.common.credentials._AccessToken', None):
with pytest.raises(ImportError):
cred.get_token("http://resource.yetid") |
equip/analysis/graph/io.py | neuroo/equip | 102 | 12758504 | <reponame>neuroo/equip
# -*- coding: utf-8 -*-
"""
equip.analysis.graph.io
~~~~~~~~~~~~~~~~~~~~~~~
Outputs the graph structures
:copyright: (c) 2014 by <NAME> (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
from .graphs import DiGraph, Tree
DOT_STYLE = """
rankdir=TD; ordering=out;
graph[fontsize=10 fontname="Verdana"];
color="#efefef";
node[shape=box style=filled fontsize=8 fontname="Verdana" fillcolor="#efefef"];
edge[fontsize=8 fontname="Verdana"];
"""
class DotConverter(object):
def __init__(self, graph):
self.g = graph
self.buffer = ''
self.node_ids = {}
@staticmethod
def process(graph):
converter = DotConverter(graph)
converter.run()
return converter.buffer
def run(self):
self.buffer += 'digraph G {'
self.buffer += DOT_STYLE
if isinstance(self.g, DiGraph):
for edge in self.g.edges:
self.add_edge(edge)
elif isinstance(self.g, Tree):
root = self.g.root
worklist = [root]
while worklist:
current = worklist.pop(0)
if current.has_children():
num_children = current.num_children()
i = 0
while i < num_children:
child = current.children[i]
if child is None:
i += 1
continue
self.add_tree_edge(current, child)
worklist.insert(0, child)
i += 1
else:
nid = self.get_node_id(current)
self.buffer += '}\n'
def add_edge(self, edge):
labels = ''
if edge.kind is not None:
data = '' if edge.data is None else str(edge.data)
labels = '[label="%s - %s"]' % (edge.kind, data)
nid1 = self.get_node_id(edge.source)
nid2 = self.get_node_id(edge.dest)
self.buffer += '%s -> %s %s;\n' % (nid1, nid2, labels)
def add_tree_edge(self, node1, node2):
nid1 = self.get_node_id(node1)
nid2 = self.get_node_id(node2)
self.buffer += '%s -> %s;\n' % (nid1, nid2)
def get_node_id(self, node):
if node not in self.node_ids:
self.node_ids[node] = 'node_%d' % node.gid
self.add_node(node, self.node_ids[node])
return self.node_ids[node]
def add_node(self, node, node_id):
label = ''
if node.data is not None:
node_kind = ('%s - ' % node.kind) if node.kind is not None else ''
label = '[label="Node%d - %s%s"]' % (node.gid, node_kind, node.data)
self.buffer += '%s %s;\n' % (node_id, label)
|
utils/helper.py | parksunwoo/daanet | 145 | 12758509 | <gh_stars>100-1000
import importlib
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
import traceback
from collections import defaultdict
from random import shuffle
import GPUtil
import tensorflow as tf
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from tensorflow.contrib.training import HParams
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from gpu_env import APP_NAME, DEVICE_ID, IGNORE_PATTERNS
millnames = ['', ' K', ' M', ' BL', ' TL']
regex_title_source = re.compile(r'^([^_\-—]*).*?[_\-—]\s?([^_\-—]+)[\s_\-—]?$')
def set_logger(model_id=None):
logger = logging.getLogger(APP_NAME)
logger.setLevel(logging.INFO)
if model_id:
formatter = logging.Formatter(
'%(levelname)-.1s:' + model_id + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(
'%(levelname)-.1s:[%(filename)s:%(lineno)d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def touch(fname: str, times=None, create_dirs: bool = False):
import os
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, 'a'):
os.utime(fname, times)
def touch_dir(base_dir: str) -> None:
import os
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def millify(n):
n = float(n)
millidx = max(0, min(len(millnames) - 1,
int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))
return '{:.0f}{}'.format(n / 10 ** (3 * millidx), millnames[millidx])
def args2hparam(args, vocab):
params = vars(args)
params['vocab'] = vocab
p = HParams()
for k, v in params.items():
p.add_hparam(k, v)
return p
def runner(main, *done):
logger = logging.getLogger(APP_NAME)
try:
main()
except (tf.errors.OutOfRangeError, IndexError) as e:
logger.warning('Data has been exhausted! Done!')
finally:
[f() for f in done]
def parse_yaml(yaml_path, model_id):
from tensorflow.contrib.training import HParams
from ruamel.yaml import YAML
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams:
hparams.set_hparam(k, v)
else:
hparams.add_hparam(k, v)
return hparams
def parse_args(yaml_path, model_id, default_set, followup=None):
logger = logging.getLogger(APP_NAME)
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open('default.yaml') as fp:
configs = YAML().load(fp)
default_cfg = configs[default_set]
add_param_recur(hparams, default_cfg)
if yaml_path:
logger.info('loading parameters...')
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams and hparams.get(k) != v:
logger.info('%20s: %20s -> %20s' % (k, hparams.get(k), v))
hparams.set_hparam(k, v)
elif k not in hparams: # add new parameter
hparams.add_hparam(k, v)
logger.info('%30s %20s: %20s' % ("[add from %s]" % yaml_path, k, hparams.get(k)))
if followup:
# useful when changing args for prediction
logger.info('override args with follow-up args...')
for k, v in followup.items():
if k in hparams and hparams.get(k) != v:
logger.info('%20s: %20s -> %20s' % (k, hparams.get(k), v))
hparams.set_hparam(k, v)
elif k not in hparams:
logger.warning('%s is not a valid attribute! ignore!' % k)
if 'save_dir' not in hparams:
hparams.add_hparam('save_dir', os.path.join(hparams.get('model_dir'), hparams.get('model_id')))
if 'code_dir' not in hparams:
hparams.add_hparam('code_dir', os.path.join(hparams.get('save_dir'), 'code'))
hparams.set_hparam('summary_dir', os.path.join(hparams.get('save_dir'), 'summary'))
# reset logger model id
logger = set_logger(model_id='%s:%s' % (DEVICE_ID, hparams.get('model_id')))
try:
shutil.copytree('./', hparams.get('code_dir'), ignore=shutil.ignore_patterns(*IGNORE_PATTERNS))
logger.info('current code base is copied to %s' % hparams.get('save_dir'))
except FileExistsError:
logger.info('code base exist, no need to copy!')
# if hparams.get('model_id') != model_id:
# logger.warning('model id is changed %s -> %s! '
# 'This happens when you train a pretrained model' % (
# hparams.get('model_id'), model_id))
# hparams.set_hparam('model_id', model_id)
if 'loss_csv_file' not in hparams:
hparams.add_hparam('loss_csv_file', os.path.join(hparams.get('save_dir'), 'loss.csv'))
if 'is_serving' not in hparams:
hparams.add_hparam('is_serving', False)
logger.info('current parameters')
for k, v in sorted(vars(hparams).items()):
if not k.startswith('_'):
logger.info('%20s = %-20s' % (k, v))
return hparams
def add_param_recur(root, p_tree):
for k, v in p_tree.items():
if isinstance(v, CommentedMap):
new_node = HParams()
add_param_recur(new_node, v)
root.add_hparam(k, new_node)
else:
root.add_hparam(k, v)
def fill_gpu_jobs(all_jobs, logger, job_parser,
wait_until_next=300, retry_delay=300, do_shuffle=False):
if do_shuffle:
shuffle(all_jobs)
all_procs = []
while all_jobs:
logger.info('number of jobs in the queue: %d' % len(all_jobs))
j = all_jobs.pop()
logger.info('will start the job: %s ...' % job_parser(j))
try:
GPUtil.getFirstAvailable()
# check if there is a free GPU!
process = subprocess.Popen(job_parser(j), shell=True)
all_procs.append((process, j))
time.sleep(wait_until_next)
except FileNotFoundError:
logger.warning('there is no gpu, running on cpu!')
process = subprocess.Popen(job_parser(j), shell=True)
all_procs.append((process, j))
except RuntimeError as e:
logger.error(str(e))
logger.warning('all gpus are busy! waiting for a free slot...')
# add job back
all_jobs.append(j)
time.sleep(retry_delay)
exit_codes = [(p.wait(), j) for p, j in all_procs]
return [v for p, v in exit_codes if p != 0]
def get_args_cli(args):
d = defaultdict(list)
if args:
for k, v in ((k.lstrip('-'), v) for k, v in (a.split('=') for a in args)):
d[k].append(v)
for k, v in d.items():
parsed_v = [s for s in (parse_arg(vv) for vv in v) if s is not None]
if len(parsed_v) > 1:
d[k] = parsed_v
if len(parsed_v) == 1:
d[k] = parsed_v[0]
return d
def parse_arg(v: str):
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def get_scope_name():
return tf.get_variable_scope().name.split('/')[0]
def sparse_nll_loss(probs, labels, epsilon=1e-9, scope=None):
"""
negative log likelihood loss
"""
with tf.name_scope(scope, "log_loss"):
labels = tf.one_hot(labels, tf.shape(probs)[1], axis=1, dtype=tf.float32)
losses = - tf.reduce_sum(labels * tf.log(probs + epsilon), 1)
return losses
def normalize_distribution(p, eps=1e-9):
p += eps
norm = tf.reduce_sum(p, axis=1)
return tf.cast(p, tf.float32) / tf.reshape(norm, (-1, 1))
def kl_divergence(p, q, eps=1e-9):
p = normalize_distribution(p, eps)
q = normalize_distribution(q, eps)
return tf.reduce_sum(p * tf.log(p / q), axis=1)
def get_kl_loss(start_label, start_probs, bandwidth=1.0):
a = tf.reshape(tf.range(tf.shape(start_probs)[1]), (1, -1))
b = tf.reshape(start_label, (-1, 1))
start_true_probs = tf.exp(-tf.cast(tf.squared_difference(a, b), tf.float32) / bandwidth)
return sym_kl_divergence(start_true_probs, start_probs)
def sym_kl_divergence(p, q, eps=1e-9):
return (kl_divergence(p, q, eps) + kl_divergence(q, p, eps)) / 2.0
def get_conv1d(x, out_dim, window_len, name, act_fn):
return tf.layers.conv1d(x, out_dim, window_len, strides=1, padding='SAME', name=name, activation=act_fn)
def upsampling_a2b(a, b, D_a):
return tf.squeeze(tf.image.resize_images(tf.expand_dims(a, axis=-1), [tf.shape(b)[1], D_a],
method=ResizeMethod.NEAREST_NEIGHBOR), axis=-1)
def dropout(args, keep_prob, is_train, mode="recurrent"):
if keep_prob < 1.0:
noise_shape = None
scale = 1.0
shape = tf.shape(args)
if mode == "embedding":
noise_shape = [shape[0], 1]
scale = keep_prob
if mode == "recurrent" and len(args.get_shape().as_list()) == 3:
noise_shape = [shape[0], 1, shape[-1]]
args = tf.cond(is_train, lambda: tf.nn.dropout(
args, keep_prob, noise_shape=noise_shape) * scale, lambda: args)
return args
def get_tmp_yaml(par, prefix=None):
import tempfile
with tempfile.NamedTemporaryFile('w', delete=False, prefix=prefix) as tmp:
YAML().dump(par, tmp)
return tmp.name
def build_model(args, reset_graph=True):
rccore = importlib.import_module(args.package_rccore)
if reset_graph:
tf.reset_default_graph()
return rccore.RCCore(args)
def get_last_output(output, sequence_length, name):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
rng = tf.range(0, tf.shape(sequence_length)[0])
indexes = tf.stack([rng, sequence_length - 1], 1)
return tf.gather_nd(output, indexes, name)
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, cur_dir)
__import__(mod_str)
sys.path.remove(cur_dir)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def delete_module(modname):
from sys import modules
del_keys = []
for mod_key, mod_value in modules.items():
if modname in mod_key:
del_keys.append(mod_key)
elif modname in str(mod_value):
del_keys.append(mod_key)
for key in del_keys:
del modules[key]
|
deepnet/sparse_code_layer.py | airingzhang/deepnet | 626 | 12758521 | from layer import *
class SparseCodeLayer(Layer):
def AllocateBatchsizeDependentMemory(self, batchsize):
super(SparseCodeLayer, self).AllocateBatchsizeDependentMemory(batchsize)
self.approximator = cm.empty(self.state.shape)
self.temp3 = cm.empty(self.state.shape)
self.grad = cm.empty(self.state.shape)
self.grad_scale = cm.CUDAMatrix(np.zeros((self.state.shape[0], 1)))
self.m_by_m = cm.empty((self.state.shape[0], self.state.shape[0]))
def ApplyActivation(self, state):
if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:
cm.sigmoid(state)
elif self.activation == deepnet_pb2.Hyperparams.TANH:
cm.tanh(state)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:
state.greater_than(0, target=self.temp)
state.mult(self.temp)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:
cm.log_1_plus_exp(state)
elif self.activation == deepnet_pb2.Hyperparams.LINEAR:
pass
def ComputeDeriv(self, state):
"""Compute derivative w.r.t input given derivative w.r.t output."""
if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:
self.deriv.apply_logistic_deriv(state)
elif self.activation == deepnet_pb2.Hyperparams.TANH:
self.deriv.apply_tanh_deriv(state)
if self.hyperparams.dropout:
self.deriv.mult(self.mask)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:
self.deriv.apply_rectified_linear_deriv(state)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:
self.deriv.apply_rectified_linear_smooth_deriv(state)
elif self.activation == deepnet_pb2.Hyperparams.LINEAR:
if self.hyperparams.dropout:
self.deriv.mult(self.mask)
elif self.activation == deepnet_pb2.Hyperparams.SOFTMAX:
raise Exception('Not implemented.')
else:
raise Exception('Unknown activation.')
|
pool_automation/roles/aws_manage/library/test_stateful_set.py | Rob-S/indy-node | 627 | 12758527 | <reponame>Rob-S/indy-node<gh_stars>100-1000
import random
import string
import json
import boto3
import pytest
from stateful_set import (
AWS_REGIONS, InstanceParams, find_ubuntu_ami,
AwsEC2Launcher, AwsEC2Terminator, find_instances,
valid_instances, get_tag, manage_instances
)
class EC2TestCtx(object):
def __init__(self, region, resource, client, prices=None):
self.region = region
self.resource = resource
self.client = client
self.prices = prices
############
# FIXTURES #
############
@pytest.fixture
def ec2(regions, ec2_all):
return [ec2_all[r]['rc'] for r in regions]
@pytest.fixture
def ec2cl(regions, ec2_all):
return [ec2_all[r]['cl'] for r in regions]
@pytest.fixture
def ec2_resources(request, regions, ec2):
def gen_params(group_suffix=None, key_name_suffix=None,
security_group_suffix=None):
def _random(N=7):
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(N))
return InstanceParams(
project='Indy-PA-dev',
add_tags={'Purpose': 'Test Pool Automation'},
namespace='test_stateful_set',
group="group_{}"
.format(group_suffix if group_suffix
else _random()),
key_name="test_stateful_set_key_{}"
.format(key_name_suffix if key_name_suffix
else _random()),
security_group="test_stateful_set_security_group_{}"
.format(security_group_suffix
if security_group_suffix
else _random()),
type_name='t2.micro',
# TODO docs
market_spot=(request.config.getoption("--market-type") == 'spot'),
spot_max_price=None,
# TODO docs
ebs_volume_size=9,
ebs_volume_type='gp2',
)
def manage_key_pair(ec2, present, params):
count = 0
for key in ec2.key_pairs.all():
if key.key_name != params.key_name:
continue
if present and count == 0:
count = 1
else:
key.delete()
if present and count == 0:
ec2.create_key_pair(KeyName=params.key_name)
def manage_security_group(ec2, present, params):
count = 0
for sgroup in ec2.security_groups.all():
if sgroup.group_name != params.security_group:
continue
if present and count == 0:
count = 1
else:
sgroup.delete()
if present and count == 0:
sg = ec2.create_security_group(
GroupName=params.security_group,
Description='Test security group')
sg.create_tags(Tags=[
{'Key': 'Name', 'Value': "{}-{}-{}"
.format(params.project,
params.namespace,
params.group)},
{'Key': 'Project', 'Value': params.project},
{'Key': 'Namespace', 'Value': params.namespace},
{'Key': 'Group', 'Value': params.group}])
params = gen_params(
group_suffix=request.node.name,
key_name_suffix=request.node.name,
security_group_suffix=request.node.name
)
for rc in ec2:
manage_key_pair(rc, True, params)
manage_security_group(rc, True, params)
yield params
terminator = AwsEC2Terminator()
for region, rc in zip(regions, ec2):
for inst in find_instances(
rc, params.project, params.namespace, params.group):
terminator.terminate(inst, region)
terminator.wait(False)
for rc in ec2:
manage_key_pair(rc, False, params)
manage_security_group(rc, False, params)
@pytest.fixture(scope="session")
def pricing_client():
# pricing API is available only through us-east-1 and ap-south-1
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-pelong.html
return boto3.client('pricing', region_name='us-east-1')
@pytest.fixture
def on_demand_prices(request, pricing_client, ec2_prices,
regions, ec2_resources):
marker = request.node.get_closest_marker('prices')
if not (marker and ('on-demand' in marker.kwargs.get('term', []))):
return
for region_code in regions:
res = ec2_prices[region_code]['on-demand'].get(ec2_resources.type_name)
if res is None:
# Search product filters
# https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_pricing_Filter.html
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-ppslong.html
filters = [
{'Field': k, 'Type': 'TERM_MATCH', 'Value': v} for k, v in
(('tenancy', 'shared'),
('capacitystatus', 'UnusedCapacityReservation'),
('location', AWS_REGIONS[region_code].location),
('operatingSystem', 'Linux'), # TODO might be parametrized
('instanceType', ec2_resources.type_name),
('preInstalledSw', 'NA'))
]
products = pricing_client.get_products(
ServiceCode='AmazonEC2', Filters=filters)
price_info = json.loads(products['PriceList'][0])
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/reading-an-offer.html
#
# "terms": {
# "OnDemand": {
# "<sku.offerTermCode>": {
# "offerTermCode":"The term code of the product",
# "sku":"The SKU of the product",
# ...
# "priceDimensions": {
# "<sku.offerTermCode.rateCode>": {
# "rateCode":"The rate code of the price",
# ...
# "pricePerUnit": {
# "currencyCode":"currencyRate",
# }
# }
# }
# }
# }
# }
offer = price_info['terms']['OnDemand'].popitem()[1]
price_tier = offer['priceDimensions'].popitem()[1]
res = float(price_tier['pricePerUnit']['USD'])
ec2_prices[region_code]['on-demand'][ec2_resources.type_name] = res
@pytest.fixture
def ec2ctxs(regions, ec2, ec2cl, on_demand_prices, ec2_prices):
assert len(set([len(l) for l in (regions, ec2, ec2cl)])) == 1
return [EC2TestCtx(r, rc, cl, ec2_prices[r]) for r, rc, cl
in zip(regions, ec2, ec2cl)]
@pytest.fixture
def ec2ctx(ec2ctxs):
assert len(ec2ctxs) == 1
return ec2ctxs[0]
#########
# TESTS #
#########
def test_find_ubuntu_image(ec2ctx):
image_id = find_ubuntu_ami(ec2ctx.resource)
assert image_id is not None
image = ec2ctx.resource.Image(image_id)
assert image.owner_id == '099720109477' # Canonical
assert image.state == 'available'
assert image.architecture == 'x86_64'
assert 'Canonical' in image.description
assert 'Ubuntu' in image.description
assert '16.04' in image.description
assert 'UNSUPPORTED' not in image.description
# TODO split test_AwsEC2Launcher tests into multiple more focused ones
def check_instance_params(inst, params, ec2cl=None, price=None):
# https://stackoverflow.com/questions/5595425/what-is-the-best-way-to-compare-floats-for-almost-equality-in-python
# https://www.python.org/dev/peps/pep-0485/#proposed-implementation
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def check_tags(obj):
assert {'Key': 'Project', 'Value': params.project} in obj.tags
assert {'Key': 'Namespace', 'Value': params.namespace} in obj.tags
assert {'Key': 'Group', 'Value': params.group} in obj.tags
for tag_key, tag_value in params.add_tags.iteritems():
assert tag_value == get_tag(obj, tag_key)
# general
assert inst.instance_type == params.type_name
assert inst.state['Name'] == 'running'
# tags
check_tags(inst)
# linked resources
assert inst.key_name == params.key_name
assert len(inst.security_groups) == 1
assert inst.security_groups[0]['GroupName'] == params.security_group
# ebs options
volumes = list(inst.volumes.all())
assert len(volumes) == 1
assert volumes[0].size == params.ebs_volume_size
assert volumes[0].volume_type == params.ebs_volume_type
check_tags(volumes[0])
# market options
if params.market_spot:
assert inst.instance_lifecycle == 'spot'
assert inst.spot_instance_request_id is not None
spot_params = ec2cl.describe_spot_instance_requests(
SpotInstanceRequestIds=[inst.spot_instance_request_id])
assert isclose(
float(spot_params['SpotInstanceRequests'][0]['SpotPrice']),
price
)
@pytest.mark.regions([['us-east-2', 'eu-west-1']])
def test_AwsEC2Launcher_wait(ec2ctxs, ec2_resources):
launcher = AwsEC2Launcher()
instances = []
params = ec2_resources._replace(market_spot=False)
for ctx in ec2ctxs:
_instances = launcher.launch(
params, 1, region=ctx.region, ec2=ctx.resource)
assert len(_instances) == 1
instances += _instances
assert len(launcher.awaited) > 0
launcher.wait()
assert len(launcher.awaited) == 0
for inst in instances:
check_instance_params(inst, params)
def idfn_test_AwsEC2Launcher(max_price):
if max_price is None:
return 'max_price_default'
else:
return "max_price_{}".format(max_price)
@pytest.mark.prices(term="on-demand")
@pytest.mark.regions([['us-east-2'], ['eu-west-1']])
@pytest.mark.parametrize(
'max_price_factor', [None, 0.7], ids=idfn_test_AwsEC2Launcher)
def test_AwsEC2Launcher_spot(ec2ctx, ec2_resources, max_price_factor):
launcher = AwsEC2Launcher()
default_price = ec2ctx.prices['on-demand'][ec2_resources.type_name]
price = default_price * (1 if max_price_factor is None else
max_price_factor)
params = ec2_resources._replace(
market_spot=True,
spot_max_price=(None if max_price_factor is None else
"{}".format(price))
)
instances = launcher.launch(
params, 1, region=ec2ctx.region, ec2=ec2ctx.resource)
launcher.wait()
for inst in instances:
check_instance_params(inst, params, ec2ctx.client, price)
@pytest.mark.regions([['us-east-2', 'eu-west-1']])
def test_AwsEC2Terminator_wait(ec2ctxs, ec2_resources):
launcher = AwsEC2Launcher()
terminator = AwsEC2Terminator()
instances = []
params = ec2_resources._replace(market_spot=False)
for ctx in ec2ctxs:
_instances = launcher.launch(
params, 1, region=ctx.region, ec2=ctx.resource)
assert len(_instances) == 1
instances += _instances
launcher.wait()
for instance in instances:
terminator.terminate(instance)
assert len(terminator.awaited) > 0
terminator.wait()
assert len(terminator.awaited) == 0
for instance in instances:
assert instance.state['Name'] == 'terminated'
@pytest.mark.regions([['us-east-2'], ['eu-west-1']])
def test_AwsEC2Terminator_spot(ec2ctx, ec2_resources):
launcher = AwsEC2Launcher()
terminator = AwsEC2Terminator()
params = ec2_resources._replace(market_spot=True, spot_max_price=None)
instances = launcher.launch(
params, 1, region=ec2ctx.region, ec2=ec2ctx.resource)
launcher.wait()
for instance in instances:
terminator.terminate(instance)
for instance in instances:
assert instance.spot_instance_request_id is not None
spot_params = ec2ctx.client.describe_spot_instance_requests(
SpotInstanceRequestIds=[instance.spot_instance_request_id])
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#get-spot-instance-bid-status
assert (spot_params['SpotInstanceRequests'][0]['State'] in
('closed', 'cancelled'))
assert (spot_params['SpotInstanceRequests'][0]['Status']['Code'] in (
'instance-terminated-by-user',
'request-canceled-and-instance-running'
))
terminator.wait()
@pytest.mark.regions([['us-east-1']])
def test_find_instances(ec2ctx, ec2_resources):
launcher = AwsEC2Launcher()
terminator = AwsEC2Terminator()
params1 = ec2_resources._replace(
group="{}_{}".format(ec2_resources.group, 'aaa'))
params2 = ec2_resources._replace(
group="{}_{}".format(ec2_resources.group, 'bbb'))
for group in (params1.group, params2.group):
for inst in find_instances(
ec2ctx.resource, ec2_resources.project,
ec2_resources.namespace, group):
terminator.terminate(inst, ec2ctx.region)
terminator.wait(False)
launcher.launch(params1, 2, ec2=ec2ctx.resource)
launcher.launch(params2, 3, ec2=ec2ctx.resource)
aaa = find_instances(
ec2ctx.resource, params1.project, params1.namespace, params1.group)
bbb = find_instances(
ec2ctx.resource, params2.project, params2.namespace, params2.group)
aaa_and_bbb = [i for i in find_instances(
ec2ctx.resource, ec2_resources.project, ec2_resources.namespace)
if get_tag(i, 'Group') in (params1.group, params2.group)]
assert len(aaa) == 2
assert len(bbb) == 3
assert len(aaa_and_bbb) == 5
assert set(aaa).union(bbb) == set(aaa_and_bbb)
for inst in aaa_and_bbb:
terminator.terminate(inst, ec2ctx.region)
terminator.wait(False)
def test_valid_instances():
regions = ['us', 'eu']
instances = valid_instances(regions, 0)
assert instances['us'] == []
assert instances['eu'] == []
instances = valid_instances(regions, 1)
assert instances['us'] == ['1']
assert instances['eu'] == []
instances = valid_instances(regions, 2)
assert instances['us'] == ['1']
assert instances['eu'] == ['2']
instances = valid_instances(regions, 3)
assert instances['us'] == ['1', '3']
assert instances['eu'] == ['2']
instances = valid_instances(regions, 4)
assert instances['us'] == ['1', '3']
assert instances['eu'] == ['2', '4']
@pytest.mark.regions(
[['us-east-2', 'ca-central-1', 'eu-west-1']], ids=['3regions'])
def test_manage_instances(ec2ctxs, ec2_resources):
regions = [ctx.region for ctx in ec2ctxs]
def check_hosts(hosts):
assert len(set(host.tag_id for host in hosts)) == len(hosts)
assert len(set(host.public_ip for host in hosts)) == len(hosts)
def check_tags(instances):
for inst_group in instances:
for inst in inst_group:
inst_tag_id = get_tag(inst, 'ID')
assert inst_tag_id is not None
inst_tag_name = get_tag(inst, 'Name')
assert inst_tag_name == "{}-{}-{}-{}".format(
ec2_resources.project,
ec2_resources.namespace,
ec2_resources.group,
inst_tag_id.zfill(3)).lower()
res = manage_instances(regions, ec2_resources, 4)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert res.changed
assert len(res.active) == 4
assert len(res.terminated) == 0
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 2
assert len(instances[1]) == 1
assert len(instances[2]) == 1
assert set([get_tag(instances[0][0], 'ID'),
get_tag(instances[0][1], 'ID')]) == set(['1', '4'])
assert get_tag(instances[1][0], 'ID') == '2'
assert get_tag(instances[2][0], 'ID') == '3'
res = manage_instances(regions, ec2_resources, 4)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert not res.changed
assert len(res.active) == 4
assert len(res.terminated) == 0
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 2
assert len(instances[1]) == 1
assert len(instances[2]) == 1
assert set([get_tag(instances[0][0], 'ID'),
get_tag(instances[0][1], 'ID')]) == set(['1', '4'])
assert get_tag(instances[1][0], 'ID') == '2'
assert get_tag(instances[2][0], 'ID') == '3'
res = manage_instances(regions, ec2_resources, 2)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert res.changed
assert len(res.active) == 2
assert len(res.terminated) == 2
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 1
assert len(instances[1]) == 1
assert len(instances[2]) == 0
assert get_tag(instances[0][0], 'ID') == '1'
assert get_tag(instances[1][0], 'ID') == '2'
res = manage_instances(regions, ec2_resources, 0)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert res.changed
assert len(res.active) == 0
assert len(res.terminated) == 2
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 0
assert len(instances[1]) == 0
assert len(instances[2]) == 0
res = manage_instances(regions, ec2_resources, 0)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert not res.changed
assert len(res.active) == 0
assert len(res.terminated) == 0
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 0
assert len(instances[1]) == 0
assert len(instances[2]) == 0
|
.modules/.sqlmap/lib/takeover/abstraction.py | termux-one/EasY_HaCk | 1,103 | 12758531 | <reponame>termux-one/EasY_HaCk
#!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import sys
from extra.safe2bin.safe2bin import safechardecode
from lib.core.common import dataToStdout
from lib.core.common import Backend
from lib.core.common import getSQLSnippet
from lib.core.common import getUnicode
from lib.core.common import isStackingAvailable
from lib.core.common import readInput
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import AUTOCOMPLETE_TYPE
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.core.shell import autoCompletion
from lib.request import inject
from lib.takeover.udf import UDF
from lib.takeover.web import Web
from lib.takeover.xp_cmdshell import XP_cmdshell
class Abstraction(Web, UDF, XP_cmdshell):
"""
This class defines an abstraction layer for OS takeover functionalities
to UDF / XP_cmdshell objects
"""
def __init__(self):
self.envInitialized = False
self.alwaysRetrieveCmdOutput = False
UDF.__init__(self)
Web.__init__(self)
XP_cmdshell.__init__(self)
def execCmd(self, cmd, silent=False):
if self.webBackdoorUrl and not isStackingAvailable():
self.webBackdoorRunCmd(cmd)
elif Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
self.udfExecCmd(cmd, silent=silent)
elif Backend.isDbms(DBMS.MSSQL):
self.xpCmdshellExecCmd(cmd, silent=silent)
else:
errMsg = "Feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
def evalCmd(self, cmd, first=None, last=None):
retVal = None
if self.webBackdoorUrl and not isStackingAvailable():
retVal = self.webBackdoorRunCmd(cmd)
elif Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
retVal = self.udfEvalCmd(cmd, first, last)
elif Backend.isDbms(DBMS.MSSQL):
retVal = self.xpCmdshellEvalCmd(cmd, first, last)
else:
errMsg = "Feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
return safechardecode(retVal)
def runCmd(self, cmd):
choice = None
if not self.alwaysRetrieveCmdOutput:
message = "do you want to retrieve the command standard "
message += "output? [Y/n/a] "
choice = readInput(message, default='Y').upper()
if choice == 'A':
self.alwaysRetrieveCmdOutput = True
if choice == 'Y' or self.alwaysRetrieveCmdOutput:
output = self.evalCmd(cmd)
if output:
conf.dumper.string("command standard output", output)
else:
dataToStdout("No output\n")
else:
self.execCmd(cmd)
def shell(self):
if self.webBackdoorUrl and not isStackingAvailable():
infoMsg = "calling OS shell. To quit type "
infoMsg += "'x' or 'q' and press ENTER"
logger.info(infoMsg)
else:
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
infoMsg = "going to use injected sys_eval and sys_exec "
infoMsg += "user-defined functions for operating system "
infoMsg += "command execution"
logger.info(infoMsg)
elif Backend.isDbms(DBMS.MSSQL):
infoMsg = "going to use xp_cmdshell extended procedure for "
infoMsg += "operating system command execution"
logger.info(infoMsg)
else:
errMsg = "feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
infoMsg = "calling %s OS shell. To quit type " % (Backend.getOs() or "Windows")
infoMsg += "'x' or 'q' and press ENTER"
logger.info(infoMsg)
autoCompletion(AUTOCOMPLETE_TYPE.OS, OS.WINDOWS if Backend.isOs(OS.WINDOWS) else OS.LINUX)
while True:
command = None
try:
command = raw_input("os-shell> ")
command = getUnicode(command, encoding=sys.stdin.encoding)
except KeyboardInterrupt:
print
errMsg = "user aborted"
logger.error(errMsg)
except EOFError:
print
errMsg = "exit"
logger.error(errMsg)
break
if not command:
continue
if command.lower() in ("x", "q", "exit", "quit"):
break
self.runCmd(command)
def _initRunAs(self):
if not conf.dbmsCred:
return
if not conf.direct and not isStackingAvailable():
errMsg = "stacked queries are not supported hence sqlmap cannot "
errMsg += "execute statements as another user. The execution "
errMsg += "will continue and the DBMS credentials provided "
errMsg += "will simply be ignored"
logger.error(errMsg)
return
if Backend.isDbms(DBMS.MSSQL):
msg = "on Microsoft SQL Server 2005 and 2008, OPENROWSET function "
msg += "is disabled by default. This function is needed to execute "
msg += "statements as another DBMS user since you provided the "
msg += "option '--dbms-creds'. If you are DBA, you can enable it. "
msg += "Do you want to enable it? [Y/n] "
if readInput(msg, default='Y', boolean=True):
expression = getSQLSnippet(DBMS.MSSQL, "configure_openrowset", ENABLE="1")
inject.goStacked(expression)
# TODO: add support for PostgreSQL
# elif Backend.isDbms(DBMS.PGSQL):
# expression = getSQLSnippet(DBMS.PGSQL, "configure_dblink", ENABLE="1")
# inject.goStacked(expression)
def initEnv(self, mandatory=True, detailed=False, web=False, forceInit=False):
self._initRunAs()
if self.envInitialized and not forceInit:
return
if web:
self.webInit()
else:
self.checkDbmsOs(detailed)
if mandatory and not self.isDba():
warnMsg = "functionality requested probably does not work because "
warnMsg += "the current session user is not a database administrator"
if not conf.dbmsCred and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.PGSQL):
warnMsg += ". You can try to use option '--dbms-cred' "
warnMsg += "to execute statements as a DBA user if you "
warnMsg += "were able to extract and crack a DBA "
warnMsg += "password by any mean"
logger.warn(warnMsg)
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
success = self.udfInjectSys()
if success is not True:
msg = "unable to mount the operating system takeover"
raise SqlmapFilePathException(msg)
elif Backend.isDbms(DBMS.MSSQL):
if mandatory:
self.xpCmdshellInit()
else:
errMsg = "feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
self.envInitialized = True
|
observations/r/bmw.py | hajime9652/observations | 199 | 12758584 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bmw(path):
"""Daily Log Returns on BMW Share Price
These data are the daily log returns on BMW share price from Tuesday 2nd
January 1973 until Tuesday 23rd July 1996. The data are contained in a
numeric vector. The dates of each observation are contained in a
`times` attribute, which is an object of class `"POSIXct"` (see
`DateTimeClasses`). Note that these data form an irregular time series
because no trading takes place at the weekend.
A numeric vector containing 6146 observations, with a `times`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bmw.csv`.
Returns:
Tuple of np.ndarray `x_train` with 6146 rows and 1 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bmw.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/evir/bmw.csv'
maybe_download_and_extract(path, url,
save_file_name='bmw.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
general/add-watermark-pdf/pdf_watermarker.py | caesarcc/python-code-tutorials | 1,059 | 12758586 | from PyPDF4 import PdfFileReader, PdfFileWriter
from PyPDF4.pdf import ContentStream
from PyPDF4.generic import TextStringObject, NameObject
from PyPDF4.utils import b_
import os
import argparse
from io import BytesIO
from typing import Tuple
# Import the reportlab library
from reportlab.pdfgen import canvas
# The size of the page supposedly A4
from reportlab.lib.pagesizes import A4
# The color of the watermark
from reportlab.lib import colors
PAGESIZE = A4
FONTNAME = 'Helvetica-Bold'
FONTSIZE = 40
# using colors module
# COLOR = colors.lightgrey
# or simply RGB
# COLOR = (190, 190, 190)
COLOR = colors.red
# The position attributes of the watermark
X = 250
Y = 10
# The rotation angle in order to display the watermark diagonally if needed
ROTATION_ANGLE = 45
def get_info(input_file: str):
"""
Extracting the file info
"""
# If PDF is encrypted the file metadata cannot be extracted
with open(input_file, 'rb') as pdf_file:
pdf_reader = PdfFileReader(pdf_file, strict=False)
output = {
"File": input_file, "Encrypted": ("True" if pdf_reader.isEncrypted else "False")
}
if not pdf_reader.isEncrypted:
info = pdf_reader.getDocumentInfo()
num_pages = pdf_reader.getNumPages()
output["Author"] = info.author
output["Creator"] = info.creator
output["Producer"] = info.producer
output["Subject"] = info.subject
output["Title"] = info.title
output["Number of pages"] = num_pages
# To Display collected metadata
print("## File Information ##################################################")
print("\n".join("{}:{}".format(i, j) for i, j in output.items()))
print("######################################################################")
return True, output
def get_output_file(input_file: str, output_file: str):
"""
Check whether a temporary output file is needed or not
"""
input_path = os.path.dirname(input_file)
input_filename = os.path.basename(input_file)
# If output file is empty -> generate a temporary output file
# If output file is equal to input_file -> generate a temporary output file
if not output_file or input_file == output_file:
tmp_file = os.path.join(input_path, 'tmp_' + input_filename)
return True, tmp_file
return False, output_file
def create_watermark(wm_text: str):
"""
Creates a watermark template.
"""
if wm_text:
# Generate the output to a memory buffer
output_buffer = BytesIO()
# Default Page Size = A4
c = canvas.Canvas(output_buffer, pagesize=PAGESIZE)
# you can also add image instead of text
# c.drawImage("logo.png", X, Y, 160, 160)
# Set the size and type of the font
c.setFont(FONTNAME, FONTSIZE)
# Set the color
if isinstance(COLOR, tuple):
color = (c/255 for c in COLOR)
c.setFillColorRGB(*color)
else:
c.setFillColor(COLOR)
# Rotate according to the configured parameter
c.rotate(ROTATION_ANGLE)
# Position according to the configured parameter
c.drawString(X, Y, wm_text)
c.save()
return True, output_buffer
return False, None
def save_watermark(wm_buffer, output_file):
"""
Saves the generated watermark template to disk
"""
with open(output_file, mode='wb') as f:
f.write(wm_buffer.getbuffer())
f.close()
return True
def watermark_pdf(input_file: str, wm_text: str, pages: Tuple = None):
"""
Adds watermark to a pdf file.
"""
result, wm_buffer = create_watermark(wm_text)
if result:
wm_reader = PdfFileReader(wm_buffer)
pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False)
pdf_writer = PdfFileWriter()
try:
for page in range(pdf_reader.getNumPages()):
# If required to watermark specific pages not all the document pages
if pages:
if str(page) not in pages:
continue
page = pdf_reader.getPage(page)
page.mergePage(wm_reader.getPage(0))
pdf_writer.addPage(page)
except Exception as e:
print("Exception = ", e)
return False, None, None
return True, pdf_reader, pdf_writer
def unwatermark_pdf(input_file: str, wm_text: str, pages: Tuple = None):
"""
Removes watermark from the pdf file.
"""
pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False)
pdf_writer = PdfFileWriter()
for page in range(pdf_reader.getNumPages()):
# If required for specific pages
if pages:
if str(page) not in pages:
continue
page = pdf_reader.getPage(page)
# Get the page content
content_object = page["/Contents"].getObject()
content = ContentStream(content_object, pdf_reader)
# Loop through all the elements page elements
for operands, operator in content.operations:
# Checks the TJ operator and replaces the corresponding string operand (Watermark text) with ''
if operator == b_("Tj"):
text = operands[0]
if isinstance(text, str) and text.startswith(wm_text):
operands[0] = TextStringObject('')
page.__setitem__(NameObject('/Contents'), content)
pdf_writer.addPage(page)
return True, pdf_reader, pdf_writer
def watermark_unwatermark_file(**kwargs):
input_file = kwargs.get('input_file')
wm_text = kwargs.get('wm_text')
# watermark -> Watermark
# unwatermark -> Unwatermark
action = kwargs.get('action')
# HDD -> Temporary files are saved on the Hard Disk Drive and then deleted
# RAM -> Temporary files are saved in memory and then deleted.
mode = kwargs.get('mode')
pages = kwargs.get('pages')
temporary, output_file = get_output_file(
input_file, kwargs.get('output_file'))
if action == "watermark":
result, pdf_reader, pdf_writer = watermark_pdf(
input_file=input_file, wm_text=wm_text, pages=pages)
elif action == "unwatermark":
result, pdf_reader, pdf_writer = unwatermark_pdf(
input_file=input_file, wm_text=wm_text, pages=pages)
# Completed successfully
if result:
# Generate to memory
if mode == "RAM":
output_buffer = BytesIO()
pdf_writer.write(output_buffer)
pdf_reader.stream.close()
# No need to create a temporary file in RAM Mode
if temporary:
output_file = input_file
with open(output_file, mode='wb') as f:
f.write(output_buffer.getbuffer())
f.close()
elif mode == "HDD":
# Generate to a new file on the hard disk
with open(output_file, 'wb') as pdf_output_file:
pdf_writer.write(pdf_output_file)
pdf_output_file.close()
pdf_reader.stream.close()
if temporary:
if os.path.isfile(input_file):
os.replace(output_file, input_file)
output_file = input_file
def watermark_unwatermark_folder(**kwargs):
"""
Watermarks all PDF Files within a specified path
Unwatermarks all PDF Files within a specified path
"""
input_folder = kwargs.get('input_folder')
wm_text = kwargs.get('wm_text')
# Run in recursive mode
recursive = kwargs.get('recursive')
# watermark -> Watermark
# unwatermark -> Unwatermark
action = kwargs.get('action')
# HDD -> Temporary files are saved on the Hard Disk Drive and then deleted
# RAM -> Temporary files are saved in memory and then deleted.
mode = kwargs.get('mode')
pages = kwargs.get('pages')
# Loop though the files within the input folder.
for foldername, dirs, filenames in os.walk(input_folder):
for filename in filenames:
# Check if pdf file
if not filename.endswith('.pdf'):
continue
# PDF File found
inp_pdf_file = os.path.join(foldername, filename)
print("Processing file:", inp_pdf_file)
watermark_unwatermark_file(input_file=inp_pdf_file, output_file=None,
wm_text=wm_text, action=action, mode=mode, pages=pages)
if not recursive:
break
def is_valid_path(path):
"""
Validates the path inputted and checks whether it is a file path or a folder path
"""
if not path:
raise ValueError(f"Invalid Path")
if os.path.isfile(path):
return path
elif os.path.isdir(path):
return path
else:
raise ValueError(f"Invalid Path {path}")
def parse_args():
"""
Get user command line parameters
"""
parser = argparse.ArgumentParser(description="Available Options")
parser.add_argument('-i', '--input_path', dest='input_path', type=is_valid_path,
required=True, help="Enter the path of the file or the folder to process")
parser.add_argument('-a', '--action', dest='action', choices=[
'watermark', 'unwatermark'], type=str, default='watermark',
help="Choose whether to watermark or to unwatermark")
parser.add_argument('-m', '--mode', dest='mode', choices=['RAM', 'HDD'], type=str,
default='RAM', help="Choose whether to process on the hard disk drive or in memory")
parser.add_argument('-w', '--watermark_text', dest='watermark_text',
type=str, required=True, help="Enter a valid watermark text")
parser.add_argument('-p', '--pages', dest='pages', type=tuple,
help="Enter the pages to consider e.g.: [2,4]")
path = parser.parse_known_args()[0].input_path
if os.path.isfile(path):
parser.add_argument('-o', '--output_file', dest='output_file',
type=str, help="Enter a valid output file")
if os.path.isdir(path):
parser.add_argument('-r', '--recursive', dest='recursive', default=False, type=lambda x: (
str(x).lower() in ['true', '1', 'yes']), help="Process Recursively or Non-Recursively")
# To Porse The Command Line Arguments
args = vars(parser.parse_args())
# To Display The Command Line Arguments
print("## Command Arguments #################################################")
print("\n".join("{}:{}".format(i, j) for i, j in args.items()))
print("######################################################################")
return args
if __name__ == '__main__':
# Parsing command line arguments entered by user
args = parse_args()
# If File Path
if os.path.isfile(args['input_path']):
# Extracting File Info
get_info(input_file=args['input_path'])
# Encrypting or Decrypting a File
watermark_unwatermark_file(
input_file=args['input_path'], wm_text=args['watermark_text'], action=args[
'action'], mode=args['mode'], output_file=args['output_file'], pages=args['pages']
)
# If Folder Path
elif os.path.isdir(args['input_path']):
# Encrypting or Decrypting a Folder
watermark_unwatermark_folder(
input_folder=args['input_path'], wm_text=args['watermark_text'],
action=args['action'], mode=args['mode'], recursive=args['recursive'], pages=args['pages']
)
|
data/base_dataset.py | ArlenCHEN/SNE-RoadSeg | 213 | 12758589 | import torch.utils.data as data
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def __len__(self):
return 0
|
jorldy/core/env/gym_env.py | zenoengine/JORLDY | 300 | 12758616 | <filename>jorldy/core/env/gym_env.py
import gym
import numpy as np
from .base import BaseEnv
class _Gym(BaseEnv):
"""Gym environment.
Args:
name (str): name of environment in Gym.
render (bool): parameter that determine whether to render.
custom_action (bool): parameter that determine whether to use custom action.
"""
def __init__(
self,
name,
render=False,
custom_action=False,
**kwargs,
):
self.env = gym.make(name)
self.state_size = self.env.observation_space.shape[0]
if not custom_action:
self.action_size = (
self.env.action_space.shape[0]
if self.action_type == "continuous"
else self.env.action_space.n
)
self.render = render
def reset(self):
self.score = 0
state = self.env.reset()
state = np.expand_dims(state, 0) # for (1, state_size)
return state
def step(self, action):
if self.render:
self.env.render()
if self.action_type == "continuous":
action = ((action + 1.0) / 2.0) * (
self.env.action_space.high - self.env.action_space.low
) + self.env.action_space.low
action = np.reshape(action, self.env.action_space.shape)
else:
action = action.item()
next_state, reward, done, info = self.env.step(action)
self.score += reward
next_state, reward, done = map(
lambda x: np.expand_dims(x, 0), [next_state, [reward], [done]]
) # for (1, ?)
return (next_state, reward, done)
def close(self):
self.env.close()
class Cartpole(_Gym):
def __init__(self, action_type="discrete", **kwargs):
self.action_type = action_type
if action_type == "continuous":
super(Cartpole, self).__init__("CartPole-v1", custom_action=True, **kwargs)
self.action_size = 1
else:
super(Cartpole, self).__init__("CartPole-v1", **kwargs)
def step(self, action):
if self.render:
self.env.render()
action = action.item()
if self.action_type == "continuous":
action = 0 if action < 0 else 1
next_state, reward, done, info = self.env.step(action)
self.score += reward
reward = -1 if done else 0.1
next_state, reward, done = map(
lambda x: np.expand_dims(x, 0), [next_state, [reward], [done]]
) # for (1, ?)
return (next_state, reward, done)
class Pendulum(_Gym):
def __init__(self, **kwargs):
self.action_type = "continuous"
super(Pendulum, self).__init__("Pendulum-v1", **kwargs)
class MountainCar(_Gym):
def __init__(self, **kwargs):
self.action_type = "discrete"
super(MountainCar, self).__init__("MountainCar-v0", **kwargs)
|
stores/apps/users/admin.py | diassor/CollectorCity-Market-Place | 135 | 12758631 | from models import *
from django.contrib import admin
admin.site.register(Profile)
admin.site.register(EmailVerify)
|
ide/tasks/gist.py | Ramonrlb/cloudpebble | 147 | 12758646 | <filename>ide/tasks/gist.py
import json
import github
from celery import task
from django.db import transaction
from django.conf import settings
from ide.models.user import User
from ide.models.project import Project
from ide.utils.sdk import load_manifest_dict
from ide.models.files import SourceFile, ResourceFile, ResourceIdentifier, ResourceVariant
from ide.utils.project import APPINFO_MANIFEST, PACKAGE_MANIFEST
from ide.utils import generate_half_uuid
from utils.td_helper import send_td_event
from collections import defaultdict
import urllib2
@task(acks_late=True)
def import_gist(user_id, gist_id):
user = User.objects.get(pk=user_id)
g = github.Github()
try:
gist = g.get_gist(gist_id)
except github.UnknownObjectException:
send_td_event('cloudpebble_gist_not_found', data={'data': {'gist_id': gist_id}}, user=user)
raise Exception("Couldn't find gist to import.")
files = gist.files
default_name = gist.description or 'Sample project'
default_settings = {
'name': default_name,
'app_short_name': default_name,
'app_long_name': default_name,
'app_company_name': user.username,
'app_version_label': '1.0',
'app_is_watchface': False,
'app_is_hidden': False,
'app_is_shown_on_communication': False,
'app_capabilities': '[]',
'app_keys': '{}',
'project_type': 'native',
'app_modern_multi_js': False,
'sdk_version': '2'
}
if len(files) == 1 or ((APPINFO_MANIFEST in files or PACKAGE_MANIFEST in files) and len(files) == 2):
if 'simply.js' in files:
default_settings['project_type'] = 'simplyjs'
elif 'app.js' in files:
default_settings['project_type'] = 'pebblejs'
elif 'index.js' in files:
default_settings['project_type'] = 'rocky'
# If all files are .js or .json and there is an index.js, assume it's a rocky project.
if all(x.endswith(('.js', '.json')) for x in gist.files) and 'index.js' in files:
default_settings['project_type'] = 'rocky'
default_settings['sdk_version'] = '3'
default_settings['app_modern_multi_js'] = True
media = []
# Using defaultdict we can load project settings from a manifest dict which
# has values that default to None. This way, we can delegate
if PACKAGE_MANIFEST in files:
content = json.loads(files[PACKAGE_MANIFEST].content)
package = defaultdict(lambda: None)
package.update(content)
package['pebble'] = defaultdict(lambda: None)
package['pebble'].update(content.get('pebble', {}))
manifest_settings, media, dependencies = load_manifest_dict(package, PACKAGE_MANIFEST, default_project_type=None)
default_settings['app_keys'] = '[]'
default_settings['sdk_version'] = '3'
default_settings['app_modern_multi_js'] = True
elif APPINFO_MANIFEST in files:
content = json.loads(files[APPINFO_MANIFEST].content)
package = defaultdict(lambda: None)
package.update(content)
manifest_settings, media, dependencies = load_manifest_dict(package, APPINFO_MANIFEST, default_project_type=None)
else:
manifest_settings = {}
dependencies = {}
fixed_settings = {
'owner': user,
'app_uuid': generate_half_uuid()
}
project_settings = {}
project_settings.update(default_settings)
project_settings.update({k: v for k, v in manifest_settings.iteritems() if v is not None})
project_settings.update(fixed_settings)
with transaction.atomic():
project = Project.objects.create(**project_settings)
project.set_dependencies(dependencies)
project_type = project.project_type
if project_type == 'package':
raise Exception("Gist imports are not yet support for packages.")
if project_type != 'simplyjs':
for filename in gist.files:
target = 'app'
if not filename.endswith(('.c', '.h', '.js', '.json')):
continue
if filename in ('appinfo.json', 'package.json'):
continue
if project_type == 'native':
if filename.endswith(('.js', '.json')):
target = 'pkjs'
elif project_type == 'rocky':
if filename == 'app.js':
target = 'pkjs'
source_file = SourceFile.objects.create(project=project, file_name=filename, target=target)
source_file.save_text(gist.files[filename].content)
resources = {}
for resource in media:
kind = resource['type']
def_name = resource['name']
filename = resource['file']
regex = resource.get('characterRegex', None)
tracking = resource.get('trackingAdjust', None)
memory_format = resource.get('memoryFormat', None)
storage_format = resource.get('storageFormat', None)
space_optimisation = resource.get('spaceOptimization', None)
is_menu_icon = resource.get('menuIcon', False)
compatibility = resource.get('compatibility', None)
if filename not in gist.files:
continue
if filename not in resources:
resources[filename] = ResourceFile.objects.create(project=project, file_name=filename, kind=kind,
is_menu_icon=is_menu_icon)
# We already have this as a unicode string in .content, but it shouldn't have become unicode
# in the first place.
default_variant = ResourceVariant.objects.create(resource_file=resources[filename], tags=ResourceVariant.TAGS_DEFAULT)
default_variant.save_file(urllib2.urlopen(gist.files[filename].raw_url))
ResourceIdentifier.objects.create(
resource_file=resources[filename],
resource_id=def_name,
character_regex=regex,
tracking=tracking,
compatibility=compatibility,
memory_format=memory_format,
storage_format=storage_format,
space_optimisation=space_optimisation
)
else:
source_file = SourceFile.objects.create(project=project, file_name='app.js')
source_file.save_text(gist.files['simply.js'].content)
send_td_event('cloudpebble_gist_import', data={'data': {'gist_id': gist_id}}, project=project)
return project.id
|
lib/utils/adb.py | vividmuse/frida-skeleton | 520 | 12758701 | <reponame>vividmuse/frida-skeleton
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.utils.shell import Shell
class Adb(Shell):
def __init__(self, serial):
super().__init__()
self.serial = serial
# if we are root shell
self.is_root = False
self.check_root()
@classmethod
def start_server(cls):
return Shell().exec('adb start-server', supress_error=True)
@classmethod
def devices(cls):
return Shell().exec('adb devices', quiet=True)
def check_root(self):
if self.unsafe_shell('whoami').out == 'root':
self.is_root = True
def root(self):
self.exec('adb -s "{}" root'.format(self.serial))
self.check_root()
def unsafe_shell(self, command, root=False, quiet=False):
return self.exec(r'''adb -s "{}" shell "{}{}"'''.format(
self.serial, 'su - -c ' if root and not self.is_root else '', command), quiet)
def push(self, src, dst):
return self.exec('adb -s "{}" push "{}" "{}"'.format(self.serial, src, dst))
def reverse(self, port):
return self.exec('adb -s "{0}" reverse tcp:{1} tcp:{1}'.format(self.serial, port))
def clear_reverse(self, remote_port):
return self.exec('adb -s "{}" reverse --remove tcp:{}'.format(self.serial, remote_port))
def forward(self, local_port, remote_port):
return self.exec('adb -s "{}" forward tcp:{} tcp:{}'.format(self.serial, local_port, remote_port))
def clear_forward(self, local_port):
return self.exec('adb -s "{}" forward --remove tcp:{}'.format(self.serial, local_port))
|
src/platform/weblogic/deployers/webs_deploy.py | 0x27/clusterd | 539 | 12758712 | <filename>src/platform/weblogic/deployers/webs_deploy.py
from src.platform.weblogic.interfaces import WINTERFACES
import src.platform.weblogic.deployers.web_deploy as web_deploy
versions = ["10", "11", "12"]
title = WINTERFACES.WLS
def deploy(fingerengine, fingerprint):
return web_deploy.deploy(fingerengine, fingerprint)
|
chainer_/datasets/cifar10_cls_dataset.py | naviocean/imgclsmob | 2,649 | 12758715 | <filename>chainer_/datasets/cifar10_cls_dataset.py<gh_stars>1000+
"""
CIFAR-10 classification dataset.
"""
import os
import numpy as np
from chainer.dataset import DatasetMixin
from chainer.datasets.cifar import get_cifar10
from chainercv.transforms import random_crop
from chainercv.transforms import random_flip
from .dataset_metainfo import DatasetMetaInfo
class CIFAR10(DatasetMixin):
"""
CIFAR-10 image classification dataset.
Parameters:
----------
root : str, default '~/.chainer/datasets/cifar10'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".chainer", "datasets", "cifar10"),
mode="train",
transform=None):
assert (root is not None)
self.transform = transform
train_ds, test_ds = get_cifar10()
self.base = train_ds if mode == "train" else test_ds
def __len__(self):
return len(self.base)
def get_example(self, i):
image, label = self.base[i]
image = self.transform(image)
return image, label
class CIFAR10MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CIFAR10MetaInfo, self).__init__()
self.label = "CIFAR10"
self.short_label = "cifar"
self.root_dir_name = "cifar10"
self.dataset_class = CIFAR10
self.num_training_samples = 50000
self.in_channels = 3
self.num_classes = 10
self.input_image_size = (32, 32)
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.train_transform = CIFARTrainTransform
self.val_transform = CIFARValTransform
self.test_transform = CIFARValTransform
self.ml_type = "imgcls"
class CIFARTrainTransform(object):
"""
CIFAR-10 training transform.
"""
def __init__(self,
ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010)):
assert (ds_metainfo is not None)
self.mean = np.array(mean_rgb, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std_rgb, np.float32)[:, np.newaxis, np.newaxis]
def __call__(self, img):
img = random_crop(img=img, size=self.resize_value)
img = random_flip(img=img, x_random=True)
img -= self.mean
img /= self.std
return img
class CIFARValTransform(object):
"""
CIFAR-10 validation transform.
"""
def __init__(self,
ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010)):
assert (ds_metainfo is not None)
self.mean = np.array(mean_rgb, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std_rgb, np.float32)[:, np.newaxis, np.newaxis]
def __call__(self, img):
img -= self.mean
img /= self.std
return img
|
Anaconda-files/Program_19d.py | arvidl/dynamical-systems-with-applications-using-python | 106 | 12758720 | # Program 19d: Generalized synchronization.
# See Figure 19.8(a).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Constants
mu = 5.7
sigma = 16
b = 4
r = 45.92
g = 8 # When g=4, there is no synchronization.
tmax = 100
t = np.arange(0.0, tmax, 0.1)
def rossler_lorenz_odes(X,t):
x1, x2, x3, y1, y2, y3, z1, z2, z3 = X
dx1 = -(x2 + x3)
dx2 = x1 + 0.2*x2
dx3 = 0.2 + x3 * (x1 - mu)
dy1 = sigma * (y2 - y1) - g * (y1 - x1)
dy2 = -y1 * y3 + r*y1 - y2
dy3 = y1 * y2 - b*y3
dz1 = sigma * (z2 - z1) - g * (z1 - x1)
dz2 = -z1*z3 + r*z1 - z2
dz3 = z1*z2 - b*z3
return (dx1, dx2, dx3, dy1, dy2, dy3, dz1, dz2, dz3)
y0 = [2, -10, 44, 30, 10, 20, 31, 11, 22]
X = odeint(rossler_lorenz_odes, y0, t, rtol=1e-6)
x1, x2, x3, y1, y2, y3, x1, z2, z3 = X.T # unpack columns
plt.figure(1)
# Delete first 500 iterates.
plt.plot(y2[500:len(y2)], z2[500:len(z2)])
plt.xlabel(r'$y_2$', fontsize=15)
plt.ylabel(r'$z_2$', fontsize=15)
plt.show()
|
model-optimizer/extensions/front/ATenToEmbeddingBag.py | monroid/openvino | 2,406 | 12758723 | <gh_stars>1000+
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.ops.embedding_bag import EmbeddingBagOffsetsSum, EmbeddingBagPackedSum
from extensions.ops.rank import Rank
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementPattern
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, rename_node
from mo.ops.broadcast import Broadcast
from mo.ops.concat import Concat
from mo.ops.shape import Shape
from mo.ops.unsqueeze import Unsqueeze
from mo.utils.shape import node_to_get_shape_value_of_indices, get_canonical_axis_index_node, \
get_shape_values_by_indices_node
class AtenToEmbeddingBag(FrontReplacementPattern):
"""
Converts the ATen layer to EmbeddingBag layer.
"""
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for node in graph.get_op_nodes(op='ATen', operator='embedding_bag'):
assert node.soft_get('mode') == 0, 'ATen::embedding_bag has unsupported mode, only "sum" ' \
'mode is supported for node {}.'.format(node.id)
node_name = node.soft_get('name', node.id)
rename_node(node, node_name + '/TBR')
is_packed = False
if len(node.in_ports()) < 3 or node.in_port(2).disconnected():
is_packed = True
embedding_bag = EmbeddingBagPackedSum(graph, {'name': node_name}).create_node()
else:
embedding_bag = EmbeddingBagOffsetsSum(graph, {'name': node_name}).create_node()
node.in_port(2).get_connection().set_destination(embedding_bag.in_port(2))
rename_node(embedding_bag, node_name)
node.in_port(0).get_connection().set_destination(embedding_bag.in_port(0))
node.in_port(1).get_connection().set_destination(embedding_bag.in_port(1))
node.out_port(0).get_connection().set_source(embedding_bag.out_port(0))
if len(node.in_ports()) == 4 and not node.in_port(3).disconnected():
if is_packed:
node.in_port(3).get_connection().set_destination(embedding_bag.in_port(2))
else:
# connect per_sample_weights
node.in_port(3).get_connection().set_destination(embedding_bag.in_port(4))
weights_shape_node = Shape(graph, {'name': node_name + '/WeightsShape'}).create_node()
weights_rank_node = Rank(graph, {'name': node_name + '/WeightsRank'}).create_node()
last_dim_node = get_canonical_axis_index_node(weights_rank_node, -1)
weights_last_dim = get_shape_values_by_indices_node(weights_shape_node, last_dim_node)
weights_first_dim = node_to_get_shape_value_of_indices(weights_shape_node, [0])
zero_col_node = create_op_with_const_inputs(graph, Broadcast, {0: int64_array([0])},
{'name': node_name + '/Broadcast'})
zero_col_node.in_port(1).connect(weights_last_dim.out_port(0))
default_embeddings_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(0)},
{'name': node_name + '/Unsqueeze'})
default_embeddings_node.in_port(0).connect(zero_col_node.out_port(0))
# expand embedding table with zeros
weights_concat = Concat(graph, {'axis': 0, 'in_ports_count': 2,
'name': node_name + '/Concat'}).create_node()
embedding_bag.in_port(0).get_connection().set_destination(weights_concat.in_port(0))
weights_concat.in_port(0).get_connection().add_destination(weights_shape_node.in_port(0))
weights_concat.in_port(0).get_connection().add_destination(weights_rank_node.in_port(0))
weights_concat.in_port(1).connect(default_embeddings_node.out_port(0))
weights_concat.out_port(0).connect(embedding_bag.in_port(0))
# point default index to expanded part of embedding table
weights_first_dim.out_port(0).connect(embedding_bag.in_port(3))
|
fn/stream.py | bmintz/fn.py | 2,260 | 12758737 | from sys import version_info
if version_info[0] == 2:
from sys import maxint
else:
from sys import maxsize as maxint
from itertools import chain
from .iters import map, range
class Stream(object):
__slots__ = ("_last", "_collection", "_origin")
class _StreamIterator(object):
__slots__ = ("_stream", "_position")
def __init__(self, stream):
self._stream = stream
self._position = -1 # not started yet
def __next__(self):
# check if elements are available for next position
# return next element or raise StopIteration
self._position += 1
if (len(self._stream._collection) > self._position or
self._stream._fill_to(self._position)):
return self._stream._collection[self._position]
raise StopIteration()
if version_info[0] == 2:
next = __next__
def __init__(self, *origin):
self._collection = []
self._last = -1 # not started yet
self._origin = iter(origin) if origin else []
def __lshift__(self, rvalue):
iterator = rvalue() if callable(rvalue) else rvalue
self._origin = chain(self._origin, iterator)
return self
def cursor(self):
"""Return position of next evaluated element"""
return self._last + 1
def _fill_to(self, index):
if self._last >= index:
return True
while self._last < index:
try:
n = next(self._origin)
except StopIteration:
return False
self._last += 1
self._collection.append(n)
return True
def __iter__(self):
return self._StreamIterator(self)
def __getitem__(self, index):
if isinstance(index, int):
# todo: i'm not sure what to do with negative indices
if index < 0: raise TypeError("Invalid argument type")
self._fill_to(index)
elif isinstance(index, slice):
low, high, step = index.indices(maxint)
if step == 0: raise ValueError("Step must not be 0")
return self.__class__() << map(self.__getitem__, range(low, high, step or 1))
else:
raise TypeError("Invalid argument type")
return self._collection.__getitem__(index)
|
tests/classification/dataset_readers/boolq.py | shunk031/allennlp-models | 402 | 12758748 | <reponame>shunk031/allennlp-models
# -*- coding: utf-8 -*-
from allennlp.common.util import ensure_list
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp_models.classification import BoolQDatasetReader
from tests import FIXTURES_ROOT
class TestBoolqReader:
boolq_path = FIXTURES_ROOT / "classification" / "boolq.jsonl"
def test_boolq_dataset_reader_default_setting(self):
reader = BoolQDatasetReader()
instances = reader.read(self.boolq_path)
instances = ensure_list(instances)
assert len(instances) == 5
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"Persian",
"language",
"--",
"Persian",
"(/ˈpɜːrʒən,",
]
assert fields["label"].label == 1
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"Epsom",
"railway",
"station",
"--",
"Epsom",
]
assert fields["label"].label == 0
def test_boolq_dataset_reader_roberta_setting(self):
reader = BoolQDatasetReader(
tokenizer=PretrainedTransformerTokenizer("roberta-base", add_special_tokens=False),
token_indexers={"tokens": PretrainedTransformerIndexer("roberta-base")},
)
instances = reader.read(self.boolq_path)
instances = ensure_list(instances)
assert len(instances) == 5
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"<s>",
"Pers",
"ian",
"Ġlanguage",
"Ġ--",
]
assert [t.text for t in fields["tokens"].tokens][-5:] == [
"Ġspeak",
"Ġthe",
"Ġsame",
"Ġlanguage",
"</s>",
]
assert fields["label"].label == 1
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"<s>",
"E",
"ps",
"om",
"Ġrailway",
]
assert [t.text for t in fields["tokens"].tokens][-5:] == [
"Ġe",
"ps",
"om",
"Ġstation",
"</s>",
]
assert fields["label"].label == 0
|
benchmarks/django_simple/app.py | p7g/dd-trace-py | 308 | 12758757 | <gh_stars>100-1000
import os
import django
from django.db import connection
from django.template import Context
from django.template import Template
from django.urls import path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
ROOT_URLCONF = __name__
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
BASE_DIR,
],
}
]
SECRET_KEY = ("SECRET",)
MIDDLEWARE = ["app.empty_middleware", "app.empty_middleware"]
ALLOWED_HOSTS = ["*"]
SETTINGS = dict((key, val) for key, val in locals().items() if key.isupper())
def empty_middleware(get_response):
def middleware(request):
response = get_response(request)
return response
return middleware
def index(request):
# render a large table template
template = Template(
(
"<table>\n"
"{% for row in table %}\n"
"<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>\n"
"{% endfor %}\n"
"</table>"
)
)
table = [range(10) for _ in range(100)]
context = Context({"table": table})
template.render(context)
# query db for random data
for _ in range(10):
with connection.cursor() as cursor:
cursor.execute(
"""with recursive
cnt( id, x) as (
values(1 , random()) union all
select id+1,random() from cnt where id<100)
select * from cnt"""
)
cursor.fetchall()
index = Template(
"""
<html lang="en">
<head>
<meta charset="utf-8">
<title>Django Simple</title>
</head>
<body>
<p>Hello {{name|default:"friend"}}!</p>
</body>
</html>
"""
)
return django.http.HttpResponse(index.render(Context({})))
urlpatterns = [path("", index)]
if __name__ == "__main__":
from django.core import management
management.execute_from_command_line()
|
data/transcoder_evaluation_gfg/python/CHECK_WHETHER_TRIANGLE_VALID_NOT_SIDES_GIVEN.py | mxl1n/CodeGen | 241 | 12758775 | <reponame>mxl1n/CodeGen<gh_stars>100-1000
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( a , b , c ) :
if ( a + b <= c ) or ( a + c <= b ) or ( b + c <= a ) :
return False
else :
return True
#TOFILL
if __name__ == '__main__':
param = [
(29,19,52,),
(83,34,49,),
(48,14,65,),
(59,12,94,),
(56,39,22,),
(68,85,9,),
(63,36,41,),
(95,34,37,),
(2,90,27,),
(11,16,1,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
examples/monthly_budget_mover_example.py | Ressmann/starthinker | 138 | 12758799 | <filename>examples/monthly_budget_mover_example.py
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.dataset.run import dataset
from starthinker.task.dbm.run import dbm
from starthinker.task.monthly_budget_mover.run import monthly_budget_mover
def recipe_monthly_budget_mover(config, recipe_timezone, recipe_name, auth_write, auth_read, partner_id, budget_categories, filter_ids, excluded_ios, version, is_colab, dataset):
"""Apply the previous month's budget/spend delta to the current month. Aggregate
up the budget and spend from the previous month of each category declared
then apply the delta of the spend and budget equally to each Line Item
under that Category.
Args:
recipe_timezone (timezone) - Timezone for report dates.
recipe_name (string) - Table to write to.
auth_write (authentication) - Credentials used for writing data.
auth_read (authentication) - Credentials used for reading data.
partner_id (integer) - The sdf file types.
budget_categories (json) - A dictionary to show which IO Ids go under which Category. {"CATEGORY1":[12345,12345,12345], "CATEGORY2":[12345,12345]}
filter_ids (integer_list) - Comma separated list of filter ids for the request.
excluded_ios (integer_list) - A comma separated list of Inserion Order Ids that should be exluded from the budget calculations
version (choice) - The sdf version to be returned.
is_colab (boolean) - Are you running this in Colab? (This will store the files in Colab instead of Bigquery)
dataset (string) - Dataset that you would like your output tables to be produced in.
"""
dataset(config, {
'description':'Create a dataset where data will be combined and transfored for upload.',
'auth':auth_write,
'dataset':dataset
})
dbm(config, {
'auth':auth_read,
'report':{
'timeout':90,
'filters':{
'FILTER_ADVERTISER':{
'values':filter_ids
}
},
'body':{
'timezoneCode':recipe_timezone,
'metadata':{
'title':recipe_name,
'dataRange':'PREVIOUS_MONTH',
'format':'CSV'
},
'params':{
'type':'TYPE_GENERAL',
'groupBys':[
'FILTER_ADVERTISER_CURRENCY',
'FILTER_INSERTION_ORDER'
],
'metrics':[
'METRIC_REVENUE_ADVERTISER'
]
}
}
},
'delete':False
})
monthly_budget_mover(config, {
'auth':'user',
'is_colab':is_colab,
'report_name':recipe_name,
'budget_categories':budget_categories,
'excluded_ios':excluded_ios,
'sdf':{
'auth':'user',
'version':version,
'partner_id':partner_id,
'file_types':'INSERTION_ORDER',
'filter_type':'FILTER_TYPE_ADVERTISER_ID',
'read':{
'filter_ids':{
'single_cell':True,
'values':filter_ids
}
},
'time_partitioned_table':False,
'create_single_day_table':False,
'dataset':dataset,
'table_suffix':''
},
'out_old_sdf':{
'bigquery':{
'dataset':dataset,
'table':recipe_name,
'schema':[
],
'skip_rows':0,
'disposition':'WRITE_TRUNCATE'
},
'file':'/content/old_sdf.csv'
},
'out_new_sdf':{
'bigquery':{
'dataset':dataset,
'table':recipe_name,
'schema':[
],
'skip_rows':0,
'disposition':'WRITE_TRUNCATE'
},
'file':'/content/new_sdf.csv'
},
'out_changes':{
'bigquery':{
'dataset':dataset,
'table':recipe_name,
'schema':[
],
'skip_rows':0,
'disposition':'WRITE_TRUNCATE'
},
'file':'/content/log.csv'
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Apply the previous month's budget/spend delta to the current month. Aggregate up the budget and spend from the previous month of each category declared then apply the delta of the spend and budget equally to each Line Item under that Category.
1. No changes made can be made in DV360 from the start to the end of this process
2. Make sure there is budget information for the current and previous month's IOs in DV360
3. Make sure the provided spend report has spend data for every IO in the previous month
4. Spend report must contain 'Revenue (Adv Currency)' and 'Insertion Order ID'
5. There are no duplicate IO Ids in the categories outlined below
6. This process must be ran during the month of the budget it is updating
7. If you receive a 502 error then you must separate your jobs into two, because there is too much information being pulled in the sdf
8. Manually run this job
9. Once the job has completed go to the table for the new sdf and export to a csv
10. Take the new sdf and upload it into DV360
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-recipe_timezone", help="Timezone for report dates.", default='America/Los_Angeles')
parser.add_argument("-recipe_name", help="Table to write to.", default=None)
parser.add_argument("-auth_write", help="Credentials used for writing data.", default='service')
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-partner_id", help="The sdf file types.", default=None)
parser.add_argument("-budget_categories", help="A dictionary to show which IO Ids go under which Category. {"CATEGORY1":[12345,12345,12345], "CATEGORY2":[12345,12345]}", default='{}')
parser.add_argument("-filter_ids", help="Comma separated list of filter ids for the request.", default=[])
parser.add_argument("-excluded_ios", help="A comma separated list of Inserion Order Ids that should be exluded from the budget calculations", default=None)
parser.add_argument("-version", help="The sdf version to be returned.", default='5')
parser.add_argument("-is_colab", help="Are you running this in Colab? (This will store the files in Colab instead of Bigquery)", default=True)
parser.add_argument("-dataset", help="Dataset that you would like your output tables to be produced in.", default='')
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_monthly_budget_mover(config, args.recipe_timezone, args.recipe_name, args.auth_write, args.auth_read, args.partner_id, args.budget_categories, args.filter_ids, args.excluded_ios, args.version, args.is_colab, args.dataset)
|
tests/test_target.py | iksteen/dpf | 133 | 12758802 | import mock
import pytest
import pwny
def test_default_arch_x86():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'i386'
assert pwny.Target().arch is pwny.Target.Arch.x86
def test_default_arch_x86_64():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'x86_64'
assert pwny.Target().arch is pwny.Target.Arch.x86
def test_default_arch_unknown():
with mock.patch('platform.machine') as platform_mock:
platform_mock.return_value = 'unknown'
assert pwny.Target().arch is pwny.Target.Arch.unknown
def test_default_arch_32bit():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('32bit',)
assert pwny.Target().bits is pwny.Target.Bits.bits_32
def test_default_arch_64bit():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('64bit',)
assert pwny.Target().bits is pwny.Target.Bits.bits_64
def test_set_arch():
with mock.patch('platform.architecture') as platform_mock:
platform_mock.return_value = ('64bit',)
target = pwny.Target(arch=pwny.Target.Arch.x86)
assert target.arch is pwny.Target.Arch.x86
def test_default_endian():
assert pwny.Target().endian is pwny.Target.Endian.little
def test_set_endian():
target = pwny.Target(arch=pwny.Target.Arch.unknown, endian=pwny.Target.Endian.big)
assert target.endian is pwny.Target.Endian.big
def test_default_bits_x86():
target = pwny.Target(arch=pwny.Target.Arch.x86)
assert target.bits == 32
@pytest.mark.xfail(raises=NotImplementedError)
def test_default_bits_unsupported():
target = pwny.Target(arch=pwny.Target.Arch.unknown)
_ = target.bits
def test_set__bits():
target = pwny.Target(arch=pwny.Target.Arch.x86, bits=64)
assert target.bits == 64
@pytest.mark.xfail(raises=ValueError)
def test_set_invalid_bits():
pwny.Target(bits=33)
def test_target_assume():
target = pwny.Target()
target.assume(pwny.Target(arch=pwny.Target.Arch.arm, endian=pwny.Target.Endian.little, bits=64, mode=2))
assert target.arch is pwny.Target.Arch.arm and \
target.endian == pwny.Target.Endian.little and \
target.bits == 64 and \
target.mode == 2
|
src/whylogs/features/__init__.py | cswarth/whylogs | 603 | 12758803 | <filename>src/whylogs/features/__init__.py
_IMAGE_FEATURES = ["Hue", "Brightness", "Saturation"]
|
src/encoded/tests/fixtures/schemas/award.py | procha2/encoded | 102 | 12758813 | import pytest
@pytest.fixture
def ENCODE3_award(testapp):
item = {
'name': 'ABC1234',
'rfa': 'ENCODE3',
'project': 'ENCODE',
'title': 'A Generic ENCODE3 Award'
}
return testapp.post_json('/award', item, status=201).json['@graph'][0]
@pytest.fixture
def award_a():
return{
'name': 'ENCODE2',
}
@pytest.fixture
def award_1(award_a):
item = award_a.copy()
item.update({
'schema_version': '1',
'rfa': "ENCODE2"
})
return item
@pytest.fixture
def award_2(award_1):
item = award_1.copy()
item.update({
'schema_version': '3',
'viewing_group': 'ENCODE',
})
return item
@pytest.fixture
def award_5(award_2):
item = award_2.copy()
item.update({
'schema_version': '6',
'viewing_group': 'ENCODE',
})
return item
@pytest.fixture
def award(testapp):
item = {
'name': 'encode3-award',
'rfa': 'ENCODE3',
'project': 'ENCODE',
'title': 'A Generic ENCODE3 Award',
'viewing_group': 'ENCODE3',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def award_modERN(testapp):
item = {
'name': 'modERN-award',
'rfa': 'modERN',
'project': 'modERN',
'title': 'A Generic modERN Award',
'viewing_group': 'ENCODE3',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def remc_award(testapp):
item = {
'name': 'remc-award',
'rfa': 'GGR',
'project': 'GGR',
'title': 'A Generic REMC Award',
'viewing_group': 'REMC',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def encode2_award(testapp):
item = {
# upgrade/shared.py ENCODE2_AWARDS
'uuid': '1a4d6443-8e29-4b4a-99dd-f93e72d42418',
'name': 'encode2-award',
'rfa': 'ENCODE2',
'project': 'ENCODE',
'title': 'A Generic ENCODE2 Award',
'viewing_group': 'ENCODE3',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def encode4_award(testapp):
item = {
'name': 'encode4-award',
'rfa': 'ENCODE4',
'project': 'ENCODE',
'title': 'A Generic ENCODE4 Award',
'viewing_group': 'ENCODE4',
'component': 'mapping',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def award_encode4(testapp):
item = {
'name': 'encode4-award',
'rfa': 'ENCODE4',
'project': 'ENCODE',
'title': 'A Generic ENCODE4 Award',
'viewing_group': 'ENCODE4',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def roadmap_award(testapp):
item = {
'name': 'roadmap-award',
'rfa': 'Roadmap',
'project': 'Roadmap',
'title': 'A Generic Roadmap Award',
'viewing_group': 'REMC',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def award_8(award_1):
item = award_1.copy()
item.update({
'schema_version': '8',
'viewing_group': 'ENCODE',
})
return item
|
digits/model/tasks/test_caffe_train.py | PhysicsTeacher13/Digits-NVIDIA | 111 | 12758827 | # Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits import test_utils
def test_caffe_imports():
test_utils.skipIfNotFramework('caffe')
import numpy # noqa
import google.protobuf # noqa
|
server/server.py | cattlepi/cattlepi | 257 | 12758837 | <reponame>cattlepi/cattlepi<filename>server/server.py<gh_stars>100-1000
import falcon
import json
import os
import hashlib
class ServerUtils(object):
@staticmethod
def get_file_location(filename):
dirname = os.path.dirname(__file__)
relpath = os.path.join(dirname, '../builder/latest/output', filename)
return os.path.abspath(relpath)
@staticmethod
def get_file_dir(filename):
return os.path.dirname(ServerUtils.get_file_location(filename))
@staticmethod
def get_my_rsa_key():
path_to_key = os.path.join(os.environ['HOME'], '.ssh/id_rsa.pub')
return open(path_to_key).read().strip()
class DeviceConfigResource(object):
def md5(self, fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_filedescriptor(self, filename):
return {
'url': "http://%s/images/global/%s" % (os.environ['CATTLEPI_LOCALAPI'], filename),
'md5sum': self.md5(ServerUtils.get_file_location(filename))
}
def on_get(self, req, resp, deviceid):
resp.status = falcon.HTTP_200
body = {
'initfs': self.get_filedescriptor('initramfs.tgz'),
'rootfs': self.get_filedescriptor('rootfs.sqsh'),
'bootcode': '',
'usercode': '',
'config': {
'ssh': {
'pi': {
'authorized_keys': [ ServerUtils.get_my_rsa_key() ]
}
}
}
}
resp.body = json.dumps(body)
class TrackAllResource(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "Ok: dummy response"
class TrackResource(object):
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = "Ok: dummy response"
app = falcon.API()
app.add_route('/boot/{deviceid}/config', DeviceConfigResource())
app.add_route('/track', TrackAllResource())
app.add_route('/track/{deviceid}', TrackResource())
app.add_static_route('/images/global', ServerUtils.get_file_dir('initramfs.tgz')) |
src/0064.minimum-path-sum/minimum-path-sum.py | lyphui/Just-Code | 782 | 12758838 | <filename>src/0064.minimum-path-sum/minimum-path-sum.py
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
dp = grid[0][:]
for i in range(1, n):
dp[i] += dp[i-1]
for i in range(1, m):
for j in range(n):
if j > 0:
dp[j] = grid[i][j] + min(dp[j], dp[j-1])
else:
dp[j] = grid[i][j] + dp[j]
return dp[-1] |
scripts/standalone_blob_server.py | nishp77/lbry-sdk | 4,996 | 12758848 | <filename>scripts/standalone_blob_server.py<gh_stars>1000+
import sys
import os
import asyncio
from lbry.blob.blob_manager import BlobManager
from lbry.blob_exchange.server import BlobServer
from lbry.schema.address import decode_address
from lbry.extras.daemon.storage import SQLiteStorage
async def main(address: str):
try:
decode_address(address)
except:
print(f"'{address}' is not a valid lbrycrd address")
return 1
loop = asyncio.get_running_loop()
storage = SQLiteStorage(os.path.expanduser("~/.lbrynet/lbrynet.sqlite"))
await storage.open()
blob_manager = BlobManager(loop, os.path.expanduser("~/.lbrynet/blobfiles"), storage)
await blob_manager.setup()
server = await loop.create_server(
lambda: BlobServer(loop, blob_manager, address),
'0.0.0.0', 4444)
try:
async with server:
await server.serve_forever()
finally:
await storage.close()
if __name__ == "__main__":
asyncio.run(main(sys.argv[1]))
|
orbitdeterminator/doppler/utils/utils.py | DewanshiDewan/orbitdeterminator | 158 | 12758851 | <reponame>DewanshiDewan/orbitdeterminator<filename>orbitdeterminator/doppler/utils/utils.py
import numpy as np
from scipy.integrate import odeint # Orbit propagation
from scipy.optimize import fsolve # For solving TDoA
from sgp4.api import Satrec
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import EarthLocation, ITRS, ICRS, TEME, CartesianDifferential, CartesianRepresentation
from orbitdeterminator.doppler.utils.constants import *
def range_range_rate(x_sat:np.ndarray, x_obs:np.ndarray):
""" Get range and slant range rate (radial relative velocity component).
Vectorized.
Args:
x_sat (np.ndarray): satellite location (pos, vel).
x_obs (np.ndarray): observer location (pos, vel).
Returns:
r (np.ndarray): range.
rr (np.ndarray): range rate (slant range rate).
"""
if len(x_obs.shape) == 2: # Single observer (6, n)
einsum_format = 'ij,ij->j'
d = x_sat - x_obs # Difference
elif len(x_obs.shape) == 3: # Multiple observers (6,n,n_obs)
einsum_format = 'ijk,ijk->jk'
d = np.repeat(np.expand_dims(x_sat, axis = 2), x_obs.shape[2], axis = 2) - x_obs # Difference
#d = x_sat - x_obs # Difference
r = np.linalg.norm(d[0:3,], axis=0) # Range
l = d[0:3,] / np.linalg.norm(d[0:3,], axis=0) # Range unit vectors
rr = np.einsum(einsum_format, d[3:6,], l) # Radial range rate
return r.T, rr.T
def doppler_shift(x_sat:np.ndarray, x_obs:np.ndarray, f_ref:float, c:float):
""" Get Doppler shift value for the give satellite and observer vectors.
Vectorized.
Args:
x_sat (np.ndarray): satellite location (pos, vel).
x_obs (np.ndarray): observer location (pos, vel).
f_ref (float): reference frequency.
c (float): propagation speed.
Returns:
df (np.ndarray): frequency shift relative to reference frequenct df
"""
_, rv = range_range_rate(x_sat, x_obs)
df = rv / c * f_ref
return df
# Orbit derivative
def orbdyn_2body(x:np.ndarray, t:float, mu:float=3.986004418e14):
""" Orbital (x,y,z,x_dot,y_dot,z_dot) vector derivative
Args:
x (np.ndarray): state vector.
t (float): time.
Returns:
dxdt (np.ndarray): state vector time derivative.
"""
r = np.linalg.norm(x[0:3,], axis=0)
dxdt = np.zeros(x.shape)
dxdt[0:3,] = x[3:6,]
dxdt[3:6,] = -(mu/r**3) * x[0:3,]
return dxdt
def orbdyn_2body_stm(x:np.ndarray, t:float, mu:float=3.986004418e14):
""" Orbital (x,y,z,x_dot,y_dot,z_dot) vector and matrix derivative.
Phi_dot = A * Phi.
Args:
x (np.ndarray): state vector and flattened state transition matrix [x, Phi(:)]
Size: (6+6*6,): (42,).
t (float): time.
Returns:
dxdt (np.ndarray): state vector and state transition matrix time derivative.
"""
dxdt = np.zeros(x.shape)
r = np.linalg.norm(x[0:3,], axis=0)
dxdt[0:3,] = x[3:6,]
dxdt[3:6,] = (-mu / r**3) * x[0:3,]
A = get_matrix_A(x[0:3,], mu=mu) # (6,6,n)
if len(x.shape) == 1:
Phi = x[6:,].reshape((6, 6)) # (6,6)
Phi_dot = np.matmul(A, Phi)
dxdt[6:,] = Phi_dot.reshape((36))
else:
Phi = x[6:,].reshape((6, 6, -1)) # (6,6,n)
Phi_dot = np.einsum('ijl,jkl->ikl', A, Phi)
dxdt[6:,] = Phi_dot.reshape((36, -1))
return dxdt
def get_matrix_A(x:np.ndarray, mu:float=3.986004418e14):
""" Get A matrix (orbital x_dot = A*x). Vectorized.
Args:
x (np.ndarray): orbital state vector (Cartesian).
mu (np.ndarray): standard gravitational parameter. Defaults to 3.98e14 m^3/s^2.
Returns:
A (np.ndarray): A matrix. Size (x_dim, x_dim): (6,6).
"""
r = np.linalg.norm(x[0:3,], axis=0)
aa = -mu / r**3
b = 3 * mu / r**5
AA = np.array([
[aa + b*x[0,]**2, b*x[0,]*x[1,], b*x[0,]*x[2,]],
[b*x[0,]*x[1,], aa + b*x[1,]**2, b*x[1,]*x[2,]],
[b*x[0,]*x[2,], b*x[1,]*x[2,], aa + b*x[2,]**2,]
])
A_z = np.zeros(AA.shape) # Zero parts for A matrix
A_e = np.zeros(AA.shape) # Eye (upper right)
i = np.arange(AA.shape[0])
A_e[i, i, ] = 1
A = np.concatenate([
np.concatenate([A_z, A_e], axis=1),
np.concatenate([AA, A_z], axis=1)
], axis=0)
return A
def f_obs_range_rate(x_sat:np.ndarray, x_obs:np.ndarray):
""" Observation function for range rate.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
rr (np.ndarray): range rate. Size (z_dim, n): (1, n)
H (np.ndarray): Partial of radial range rate w.r.t state vector.
Size (z_dim, x_dim, n): (1, 6, n).
"""
_, rr = range_range_rate(x_sat, x_obs)
H = get_matrix_range_rate_H(x_sat, x_obs)
if len(x_obs.shape) == 2:
rr = np.expand_dims(rr, axis=0)
return rr, H
def f_obs_x_sat(x_sat:np.ndarray, x_obs:np.ndarray=None):
""" Observation function for full state vector.
E.g. GPS measurement
Used for debugging.
Args:
x_sat (np.ndarray): set of satellite positions.
Returns:
x_sat (np.ndarray): satellite state vector.
H (np.ndarray): observation matrix (identity).
"""
H = np.expand_dims(np.eye(x_sat.shape[0]), axis=2)
H = np.repeat(H, x_sat.shape[1], axis=2)
return x_sat, H
def get_matrix_range_rate_H(x_sat:np.ndarray, x_obs:np.ndarray):
""" Obtain measurement Jacobian for range rate measurements. Vectorized.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
H (np.ndarray): Partial of radial range rate w.r.t state vector.
Size (z_dim, x_dim, n): (1, 6, n).
"""
if len(x_obs.shape) == 2: # Single observer (6, n)
einsum_format = 'ij,ij->j'
d = x_sat - x_obs # Difference
elif len(x_obs.shape) == 3: # Multiple observers (6,n,n_obs)
einsum_format = 'ijk,ijk->jk'
d = np.repeat(np.expand_dims(x_sat, axis = 2), x_obs.shape[2], axis = 2) - x_obs # Difference
#d = x_sat - x_obs # Difference
r = np.linalg.norm(d[0:3,], axis=0) # Range
d_r = d / r # Temporary variable
H = d_r[[3,4,5,0,1,2],]
r_dot_v = np.einsum(einsum_format, d[0:3,], d[3:6]) # Dot product position, velocity
H[0:3,:] -= (d[0:3,] * r_dot_v) / r**3
if len(x_obs.shape) == 2: # Single observer (6, n)
H = np.expand_dims(H, axis=0)
elif len(x_obs.shape) == 3: # Multiple observers (6,n,n_obs)
H = np.transpose(H, (2, 0, 1))
return H # Transpose before return (H is a single row matrix)
def tdoa_objective_function(vars, *data):
""" Objective function for solving Time Differential of Arrival (TDoA).
0 = C * (TDoA + tau) - || x_sat-x_obs ||
Args:
vars (tuple): a tuple of unknowns - xyz satellite position and time offset
(x, y, z, t)
data (tuple): additional arguments - observer positions and TDoA measurements
(x_obs, tdoa)
Returns:
(tuple): tuple of objective function values
"""
x, y, z, tau = vars
x_sat = np.array([[x], [y], [z]], dtype=np.float64)
x_obs, tdoa = data
r = C*(tdoa + tau) - np.linalg.norm(x_obs - x_sat, axis=0)
return (r.item(0), r.item(1), r.item(2), r.item(3))
def get_tdoa_simulated(x_sat:np.ndarray, x_obs:np.ndarray, flag_tof:bool=False):
""" Get simulated Time Differential of Arrival measurements.
TODO: Take into account time of flight, right now it is instantaneous.
TODO: Flip range and tdoa arrays dimensions to be (n_measurements, n_stations)
Args:
x_sat (np.ndarray): set of satellite state vectors.
x_obs (np.ndarray): set of observer positions.
tof (bool): flag whether to simulate using time of flight (not currently implemented).
Returns:
tdoa (np.ndarray): set of simulated TDoA measurements.
tof (np.ndarray): set of simulate time of flights between the observer and the satellite.
"""
if flag_tof:
assert False, "Time of flight not implemented!"
else:
r, _ = range_range_rate(x_sat, x_obs)
tof = r / C
tdoa = tof - tof[0,:]
return tdoa, tof
def get_tdoa_simulated_r(r:np.ndarray):
""" Same as get_tdoa_simulated_r, but only range as argument.
TODO: Flip range and tdoa arrays dimensions to be (n_measurements, n_stations)
Args:
range(np.ndarray): set of observed ranges per station (n_stations, n_measurements).
Returns:
tdoa (np.ndarray): set of simulated TDoA measurements.
tof (np.ndarray): set of simulate time of flights between the observer and the satellite.
"""
tof = r / C
tdoa = tof - tof[0,:]
return tdoa, tof
def solve_tdoa(tdoa:np.ndarray, x_obs:np.ndarray):
""" Function to solve Time Differential of Arrival (TDoA) measurements.
Args:
tdoa (np.ndarray): array of TDoA measurements. TODO: Array dimensions.
TDoA array must include time differential for the reference station
even being zero.
x_obs (np.ndarray): array of observer positions (6, n, n_obs).
Returns:
p_sat (np.ndarray): array of multilaterated satellite positions.
tau (np.ndarray): array of time offsets for reference station
"""
n = x_obs.shape[1]
p_sat = np.zeros((3, n))
tau = np.zeros(n)
x_obs_mean = np.mean(x_obs, axis=2)
for i in range(n):
vars_0 = [x_obs_mean[0,i]*1.01, x_obs_mean[1,i]*1.01, x_obs_mean[2,i]*1.01, 5e-3]
data = (x_obs[0:3, i, :], tdoa[:, i])
result = fsolve(tdoa_objective_function, vars_0, args=data)
p_sat[:,i] = result[0:3]
tau[i] = result[3]
return p_sat, tau
def verify_sat_orbital(x_sat:np.ndarray, range_pos:np.ndarray, range_vel:np.ndarray):
""" Verifies whether given state vectors represent a valid orbital state.
This function is used to eliminate possible states that violate orbital constraints.
Args:
x_sat (np.ndarray): set of satellite positions.
range_r (np.ndarray): set of valid position vector norms.
range_v (np.ndarray): set of valid velocity vector norms.
Returns:
x_sat_ok (np.ndarray): set of satellite positions.
x_mask (np.ndarray): boolean array indicating the validity of satellite vector.
"""
r = np.linalg.norm(x_sat[0:3,], axis=0) # Norm of the position
v = np.linalg.norm(x_sat[3:6,], axis=0) # Norm of the velocity
r_mask = (r >= range_pos[0]) & (r <= range_pos[1])
v_mask = (v >= range_vel[0]) & (v <= range_vel[1])
x_mask = r_mask & v_mask
# x_mask = np.logical_and.reduce(r >= range_pos[0], r <= range_pos[1],
# v >= range_vel[0], v <= range_vel[1])
x_sat_ok = x_sat[:,x_mask]
return x_sat_ok, x_mask
def verify_sat_observer(x_sat:np.ndarray, x_obs:np.ndarray, range_range:np.ndarray):
""" Verifies whether the satellite is within the valid range from the observer.
This function is used to eliminate possible states that violate satellite-observer constraints.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
x_sat_ok (np.ndarray): set of satellite positions.
x_mask (np.ndarray): boolean array indicating the validity of satellite vector.
"""
r, _ = range_range_rate(x_sat, x_obs)
x_mask = (r >= range_range[0]) & (r <= range_range[1])
x_sat_ok = x_sat[:,x_mask]
return x_sat_ok, x_mask
def herrick_gibbs(p_sat:np.ndarray, t:np.ndarray, angle_checks=True):
""" Herrick-Gibbs Initial Orbit Determination Method. Takes three positional observations and corresponding
timesteps and outpus full state vector estimate (position and velocity) for the middle measurement.
Reference: <NAME> - Fundamentals of Astrodynamics and Applications, 4th ed., p.461, 7.5.2 Herrick-Gibbs
Args:
p_sat (np.ndarray): set of satellite positions. Three close positions are required for the method to work.
t (np.ndarray): observation times
angle_checks (bool): flag whether on not to perform angle checks between position vectors
Returns:
x_2 (np.ndarray): estimated satellite state (position + velocity for the second observation)
"""
#print(f"Herrick-Gibbs")
error = None
tolerance_angle = 10.0/180.0*np.pi
r = np.linalg.norm(p_sat, axis=0) # Magnitude of the observed positions
# Sanity checks
#angle_checks = True
if angle_checks:
p = np.cross(p_sat[:,1], p_sat[:,2])
p_n = p / np.linalg.norm(p)
x_sat_1n = p_sat[:,0] / r[0]
#copa = np.arcsin(np.dot(p_n, x_sat_1n)) # Variable unused in original code
# Check whether the vectors are coplanar
if np.abs(np.dot(x_sat_1n, p_n)) > tolerance_angle:
error = f"Error: not coplanar {np.abs(np.dot(x_sat_1n, p_n))} > {tolerance_angle}"
# Calculate angle between vectors
theta_01 = np.arccos(np.dot(p_sat[:,0], p_sat[:,1]) / (np.linalg.norm(p_sat[:,0])*np.linalg.norm(p_sat[:,1])))
theta_12 = np.arccos(np.dot(p_sat[:,1], p_sat[:,2]) / (np.linalg.norm(p_sat[:,1])*np.linalg.norm(p_sat[:,2])))
if min(theta_01, theta_12) > tolerance_angle:
error = f"Error: angles {min(theta_01, theta_12)} > {tolerance_angle}"
# Herrick-Gibbs Initial Orbit Determination
dt_10, dt_20, dt_21 = t[1]-t[0], t[2]-t[0], t[2]-t[1]
term = np.array([ -dt_21 * (1.0/(dt_10*dt_20)) + MU/(12.0*r[0]**3),
(dt_21-dt_10) * (1.0/(dt_10*dt_21)) + MU/(12.0*r[1]**3),
dt_10 * (1.0/(dt_21*dt_20)) + MU/(12.0*r[2]**3),
])
#v_sat_1 = term[0]*p_sat[:,0] + term[1]*p_sat[:,1] + term[2]*p_sat[:,2]
v_sat_1 = np.sum(term*p_sat, axis=1)
x_sat_1 = np.concatenate([p_sat[:,1], v_sat_1])
return x_sat_1, error
def batch(
x_0: np.ndarray,
P_bar_0: np.ndarray,
R: np.ndarray,
z: np.ndarray,
t: np.ndarray,
x_obs: np.ndarray,
f_obs,
tolerance: float = 1e-8,
max_iterations: int = 1000
):
""" Batch estimation algorithm.
Reference: <NAME>, <NAME>, <NAME> - Statistical Orbit Determination,
Chapter 4.6, p. 196-197 - Computational Algorithm for the Batch Processor.
Args:
x_0 (np.ndarray): Initial state vector, shape (x_dim, 1).
P_bar_0 (np.ndarray): Initial uncertainty, shape (x_dim, x_dim).
R (np.ndarray): Measurement uncertainty, shape (z_dim, z_dim).
z (np.ndarray): Array of measurements, shape (z_dim, n).
t (np.ndarray): Array of time deltas, shape (n,).
x_obs (np.ndarray): Array of observer positions (x_dim, n).
f_obs (): observation function.
tolerance (float): convergence tolerance.
Return:
x_0 (np.ndarray): new estimate for the initial state vector.
"""
n = z.shape[1]
Phi_0 = np.eye(x_0.shape[0]) # Initial State Transition Matrix
x_hat_0 = np.zeros(x_0.shape) # Nominal trajectory update
x_bar_0 = np.zeros(x_0.shape) # Apriori estimate
W = np.linalg.inv(R)
W_vec = np.repeat(np.expand_dims(W, axis=2), n, axis=2)
error = 1
i = 0
singular = False
while(np.abs(error) > tolerance and i < max_iterations):
i += 1
# Check if initial uncertainty has been set up
if np.count_nonzero(P_bar_0) == 0:
L = np.zeros(x_0.shape[0], x_0.shape[0])
else:
L = np.linalg.inv(P_bar_0)
N = L.dot(x_bar_0)
# Propagate, flatten the stm and append to the state vector
x_Phi = np.transpose(odeint(orbdyn_2body_stm,
np.concatenate([x_0.squeeze(), Phi_0.flatten()]), t, args=(MU,)))
X = x_Phi[0:6,]
Phi = x_Phi[6:,].reshape((x_0.shape[0], x_0.shape[0], t.shape[0]))
# Calculate projected observations (projected measurements and H_tilde)
y, H_t = f_obs(X, x_obs)
dy = np.expand_dims(z - y, axis=1)
# Calculate H
H_k = np.einsum('ijl,jkl->ikl', H_t, Phi)
H_kt = np.transpose(H_k, axes=(1,0,2))
# Batch update
L += np.einsum('ijl,jkl,kml->im', H_kt, W_vec, H_k)
N += np.einsum('ijl,jkl,kml->im', H_kt, W_vec, dy)
temp = np.copy(x_hat_0)
try:
x_hat_0 = np.linalg.inv(L).dot(N)
except np.linalg.LinAlgError:
print("Singular matrix exception.")
singular = True
break
x_0 += + x_hat_0
x_bar_0 -= x_hat_0
error = np.abs(np.linalg.norm(temp - x_hat_0))
np.set_printoptions(precision=2)
output = {'num_it': i, 'singular': singular}
return x_0, output |
regtests/typed/float32vec.py | ahakingdom/Rusthon | 622 | 12758866 | """simd float32vec"""
def get_data():
return [1.9, 1.8, 1.7, 0.6, 0.99,0.88,0.77,0.66]
def main():
## the translator knows this is a float32vec because there are more than 4 elements
x = y = z = w = 22/7
a = numpy.array( [1.1, 1.2, 1.3, 0.4, x,y,z,w], dtype=numpy.float32 )
## in this case the translator is not sure what the length of `u` is, so it defaults
## to using a float32vec.
u = get_data()
b = numpy.array( u, dtype=numpy.float32 )
c = a + b
print(c)
TestError( c[0]==3.0 )
TestError( c[1]==3.0 )
TestError( c[2]==3.0 )
TestError( c[3]==1.0 )
|
test/onnx/test_pytorch_onnx_onnxruntime.py | jsun94/nimble | 206 | 12758876 | <gh_stars>100-1000
import unittest
import onnxruntime # noqa
import torch
import numpy as np
import io
import itertools
import copy
from torch.nn.utils import rnn as rnn_utils
from model_defs.lstm_flattening_result import LstmFlatteningResult
from model_defs.rnn_model_with_packed_sequence import RnnModelWithPackedSequence
from test_pytorch_common import (skipIfUnsupportedMinOpsetVersion, disableScriptTest,
skipIfUnsupportedOpsetVersion, skipIfNoLapack,
skipIfUnsupportedMaxOpsetVersion, skipIfONNXShapeInference)
from test_pytorch_common import BATCH_SIZE
from test_pytorch_common import RNN_BATCH_SIZE, RNN_SEQUENCE_LENGTH, RNN_INPUT_SIZE, RNN_HIDDEN_SIZE
from typing import List
import model_defs.word_language_model as word_language_model
import torchvision
import onnx
def to_numpy(tensor):
if tensor.requires_grad:
return tensor.detach().cpu().numpy()
else:
return tensor.cpu().numpy()
def convert_to_onnx(model, input=None, opset_version=9, example_outputs=None,
do_constant_folding=True, keep_initializers_as_inputs=True,
dynamic_axes=None, input_names=None, output_names=None,
fixed_batch_size=False, training=None,
onnx_shape_inference=False,
use_new_jit_passes=False):
# export the model to ONNX
f = io.BytesIO()
input_copy = copy.deepcopy(input)
torch.onnx._export(model, input_copy, f,
opset_version=opset_version,
example_outputs=example_outputs,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names, output_names=output_names,
fixed_batch_size=fixed_batch_size, training=training,
onnx_shape_inference=onnx_shape_inference,
use_new_jit_passes=use_new_jit_passes)
# compute onnxruntime output prediction
ort_sess = onnxruntime.InferenceSession(f.getvalue())
return ort_sess
def run_ort(ort_sess, input):
input_copy = copy.deepcopy(input)
input, _ = torch.jit._flatten(input_copy)
inputs = list(map(to_numpy, input))
ort_inputs = dict((ort_sess.get_inputs()[i].name, input) for i, input in enumerate(inputs))
ort_outs = ort_sess.run(None, ort_inputs)
return ort_outs
def ort_compare_with_pytorch(ort_outs, output, rtol, atol):
output, _ = torch.jit._flatten(output)
outputs = list(map(to_numpy, output))
# compare onnxruntime and PyTorch results
assert len(outputs) == len(ort_outs), "number of outputs differ"
# compare onnxruntime and PyTorch results
[np.testing.assert_allclose(out, ort_out, rtol=rtol, atol=atol) for out, ort_out in zip(outputs, ort_outs)]
def run_model_test(self, model, batch_size=2, state_dict=None,
input=None, use_gpu=True, rtol=0.001, atol=1e-7,
example_outputs=None, do_constant_folding=True,
dynamic_axes=None, test_with_inputs=None,
input_names=None, output_names=None,
fixed_batch_size=False):
model.eval()
if input is None:
input = torch.randn(batch_size, 3, 224, 224, requires_grad=True)
with torch.no_grad():
if isinstance(input, torch.Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
ort_sess = convert_to_onnx(model, input=input, opset_version=self.opset_version,
example_outputs=output, do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes, input_names=input_names,
output_names=output_names, fixed_batch_size=fixed_batch_size, training=None,
onnx_shape_inference=self.onnx_shape_inference,
use_new_jit_passes=self.use_new_jit_passes)
ort_outs = run_ort(ort_sess, input)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
# if additional test inputs are provided run the onnx
# model with these inputs and check the outputs
if test_with_inputs is not None:
for test_input in test_with_inputs:
if isinstance(test_input, torch.Tensor):
test_input = (test_input,)
test_input_copy = copy.deepcopy(test_input)
output = model(*test_input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
ort_outs = run_ort(ort_sess, test_input)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
class TestONNXRuntime(unittest.TestCase):
from torch.onnx.symbolic_helper import _export_onnx_opset_version
opset_version = _export_onnx_opset_version
keep_initializers_as_inputs = True # For IR version 3 type export.
use_new_jit_passes = False # For testing main code-path
onnx_shape_inference = False
def setUp(self):
torch.manual_seed(0)
onnxruntime.set_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
np.random.seed(seed=0)
self.is_script_test_enabled = True
def run_test(self, model, input, rtol=1e-3, atol=1e-7, do_constant_folding=True,
batch_size=2, use_gpu=True, dynamic_axes=None, test_with_inputs=None,
input_names=None, output_names=None, fixed_batch_size=False):
def _run_test(m):
return run_model_test(self, m, batch_size=batch_size,
input=input, use_gpu=use_gpu, rtol=rtol, atol=atol,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes, test_with_inputs=test_with_inputs,
input_names=input_names, output_names=output_names,
fixed_batch_size=fixed_batch_size)
if self.is_script_test_enabled and self.use_new_jit_passes:
script_model = torch.jit.script(model)
_run_test(script_model)
_run_test(model)
def run_model_test_with_external_data(self, model, input, rtol=0.001, atol=1e-7,
example_outputs=None, do_constant_folding=True,
dynamic_axes=None, input_names=None, output_names=None,
ort_optim_on=True):
import os
import tempfile
model.eval()
with torch.no_grad():
if isinstance(input, torch.Tensor):
input = (input,)
# In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy = copy.deepcopy(input)
output = model(*input_copy)
if isinstance(output, torch.Tensor):
output = (output,)
# export the model to ONNX
with tempfile.TemporaryDirectory() as tmpdirname:
model_file_name = os.path.join(tmpdirname, 'model.onnx')
input_copy = copy.deepcopy(input)
torch.onnx.export(model, input_copy, model_file_name,
opset_version=self.opset_version,
example_outputs=output,
verbose=False,
do_constant_folding=do_constant_folding,
keep_initializers_as_inputs=self.keep_initializers_as_inputs,
dynamic_axes=dynamic_axes,
input_names=input_names, output_names=output_names,
use_external_data_format=True)
# compute onnxruntime output prediction
ort_sess_opt = onnxruntime.SessionOptions()
ort_sess_opt.graph_optimization_level = \
onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED if ort_optim_on else \
onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
ort_sess = onnxruntime.InferenceSession(model_file_name, sess_options=ort_sess_opt)
input_copy = copy.deepcopy(input)
ort_outs = run_ort(ort_sess, input_copy)
ort_compare_with_pytorch(ort_outs, output, rtol, atol)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
def test_embedding_model_with_external_data(self):
class LargeModel(torch.nn.Module):
def __init__(self):
super(LargeModel, self).__init__()
dim = 15
n = 4 * 100
self.emb = torch.nn.Embedding(n, dim)
self.lin1 = torch.nn.Linear(dim, 1)
self.seq = torch.nn.Sequential(
self.emb,
self.lin1,
)
def forward(self, input):
return self.seq(input)
model = LargeModel()
x = torch.tensor([2], dtype=torch.long)
self.run_model_test_with_external_data(model, x)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
def test_mobilenet_v2_with_external_data(self):
model = torchvision.models.mobilenet_v2(pretrained=True)
x = torch.randn(2, 3, 224, 224, requires_grad=True)
# We are turning off Onnx Runtime optimization off in this test,
# because external data format is not supported to in ORT optimizer.
# Once that support is added, we can set ort_optim_on=True (default).
self.run_model_test_with_external_data(model, x, rtol=1e-3, atol=1e-5,
ort_optim_on=False)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
def test_attribute_with_external_data(self):
class LargeModel(torch.nn.Module):
def forward(self, x):
return x + torch.ones(2, 1024)
x = torch.randn(2, 1)
self.run_model_test_with_external_data(LargeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9) # Because external data format was released with Opset 9.
@unittest.skip("Enable this once large model with subgraph is supported in ORT")
def test_subgraph_with_external_data(self):
class LargeModel(torch.nn.Module):
def forward(self, x):
for i in range(x.size(0)):
x = x + torch.ones(2, 1024)
return x
x = torch.randn(2, 1)
self.run_model_test_with_external_data(torch.jit.script(LargeModel()), x)
def test_fuse_conv_bn1d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv1d(16, 33, 3, stride=2)
self.bn = torch.nn.BatchNorm1d(33)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn2d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False)
self.bn = torch.nn.BatchNorm2d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 2, 2, requires_grad=True)
self.run_test(model, (x,))
def test_fuse_conv_bn3d(self):
class Fuse(torch.nn.Module):
def __init__(self):
super(Fuse, self).__init__()
self.conv = torch.nn.Conv3d(3, 2, (3, 5, 2), stride=(2, 1, 1), padding=(3, 2, 0), bias=False)
self.bn = torch.nn.BatchNorm3d(2)
def forward(self, x):
out = self.conv(x)
return self.bn(out)
model = Fuse()
x = torch.randn(2, 3, 10, 50, 100, requires_grad=True)
self.run_test(model, (x,), rtol=1e-3, atol=1e-6)
def test_reshape_constant_fold(self):
class Reshape(torch.nn.Module):
def __init__(self, ):
super(Reshape, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
scale_1 = self.weight.reshape(1, -1, 1, 1)
return x * scale_1
x = torch.randn(4, 5)
self.run_test(Reshape(), (x,), rtol=1e-3, atol=1e-5)
def run_word_language_model(self, model_name):
ntokens = 50
emsize = 5
nhid = 5
nlayers = 5
dropout = 0.2
tied = False
batchsize = 5
model = word_language_model.RNNModel(model_name, ntokens, emsize,
nhid, nlayers, dropout, tied,
batchsize)
x = torch.arange(0, ntokens).long().view(-1, batchsize)
# Only support CPU version, since tracer is not working in GPU RNN.
self.run_test(model, (x, model.hidden))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Faster RCNN model is not scriptable
def test_faster_rcnn(self):
model = torchvision.models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True, min_size=200,
max_size=300)
model.eval()
x = torch.randn(2, 3, 200, 300, requires_grad=True)
self.run_test(model, (x,), rtol=1e-3, atol=1e-5)
def get_image_from_url(self, url):
import os
from urllib.parse import urlsplit
from urllib import request
from PIL import Image
from torchvision import transforms
from torch._utils_internal import get_writable_path
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__)))
path = os.path.join(data_dir, filename)
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb') as f:
f.write(data)
image = Image.open(path).convert("RGB")
image = image.resize((300, 200), Image.BILINEAR)
to_tensor = transforms.ToTensor()
return to_tensor(image)
def get_test_images(self):
image_url = "http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg"
image = self.get_image_from_url(url=image_url)
images = [image]
return images
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_mask_rcnn(self):
model = torchvision.models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200,
max_size=300)
images = self.get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_keypoint_rcnn(self):
model = torchvision.models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200,
max_size=300)
images = self.get_test_images()
self.run_test(model, (images,), rtol=1e-3, atol=1e-5)
@disableScriptTest()
def test_word_language_model_RNN_TANH(self):
self.run_word_language_model("RNN_TANH")
@disableScriptTest()
def test_word_language_model_RNN_RELU(self):
self.run_word_language_model("RNN_RELU")
@disableScriptTest()
def test_word_language_model_LSTM(self):
self.run_word_language_model("LSTM")
@disableScriptTest()
def test_word_language_model_GRU(self):
self.run_word_language_model("GRU")
def test_index_1d(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_1dimslice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_sliceint(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
def test_index_2d_neg_slice(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[0:-1, :]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_mask(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.uint8)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
class MyModel(torch.nn.Module):
def forward(self, input):
return input[torch.tensor([0, 1, 0], dtype=torch.bool)]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), m1)
@disableScriptTest()
def test_dict(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in[list(x_in.keys())[0]], list(x_in.keys())[0])
return x_out
x = {torch.tensor(1.): torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x,))
@disableScriptTest()
def test_dict_str(self):
class MyModel(torch.nn.Module):
def forward(self, x_in):
x_out = {}
x_out["test_key_out"] = torch.add(x_in["test_key_in"], 2.)
return x_out
x = {"test_key_in": torch.randn(1, 2, 3)}
self.run_test(MyModel(), (x,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_cste_script(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.size(0)), torch.ones((x.size(1), x.size(0)), dtype=torch.int64)
x = torch.randn(3, 4)
self.run_test(MyModel(), x)
def test_scalar_tensor(self):
class test(torch.nn.Module):
def forward(self, input):
return torch.scalar_tensor(input.size(0)), \
torch.scalar_tensor(input.size(1), dtype=torch.int64)
x = torch.randn(2, 3, 4)
y = torch.randn(7, 8, 9)
model = test()
self.run_test(model, x, test_with_inputs=[y],
input_names=['input_1'],
dynamic_axes={'input_1': [0, 1, 2]})
def test_tensor(self):
class ScalarInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1])
x = torch.randn(3, 4)
self.run_test(ScalarInputModel(), x)
class TensorInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], input.shape[1]])
x = torch.randn(3, 4)
self.run_test(TensorInputModel(), x)
class FloatInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([float(input)])
x = torch.randn(1)
self.run_test(FloatInputModel(), x)
class InputWithDtypeModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor(input.shape[1], dtype=torch.long)
x = torch.randn(3, 4)
self.run_test(InputWithDtypeModel(), x)
class MixedInputModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.tensor([input.shape[0], int(input)])
x = torch.randn(1)
self.run_test(MixedInputModel(), x)
def test_hardtanh(self):
model = torch.nn.Hardtanh(-1.5, 2.5)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(model, x)
def test_hardtanh_script_with_default_values(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.hardtanh(x)
x = torch.arange(-5, 5).to(dtype=torch.float32)
self.run_test(MyModel(), x)
def test_clamp(self):
class ClampModel(torch.nn.Module):
def forward(self, x):
return x.clamp(-0.5, 0.5)
x = torch.randn(3, 4)
self.run_test(ClampModel(), x)
class ClampMinModel(torch.nn.Module):
def forward(self, x):
return x.clamp(min=-0.5)
x = torch.randn(3, 4)
self.run_test(ClampMinModel(), x)
class ClampMaxModel(torch.nn.Module):
def forward(self, x):
return x.clamp(max=0.5)
x = torch.randn(3, 4)
self.run_test(ClampMaxModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_clamp_dyn(self):
class ClampMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(None, x.size(0))
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMaxModel(), x)
class ClampMinModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), None)
x = torch.arange(16).view(4, 4).float()
self.run_test(ClampMinModel(), x)
class ClampMinMaxModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x.clamp(x.size(0), x.size(1))
x = torch.arange(16).view(2, 8).float()
self.run_test(ClampMinMaxModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_trace(self):
class FullModel(torch.nn.Module):
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_full_script(self):
class FullModelScripting(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.full((3, 4), x, dtype=torch.long)
x = torch.tensor(12)
self.run_test(FullModelScripting(), x)
def test_fuse_addmm(self):
class AddmmModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddmmModel(), x)
def test_maxpool(self):
model = torch.nn.MaxPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_conv(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super(TraceModel, self).__init__()
self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
class ScriptModel(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModel, self).__init__()
self.conv1 = torch.nn.Conv1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
@torch.jit.script_method
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 100)
x3 = torch.randn(20, 16, 10, 50, 100)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
self.run_test(ScriptModel(), (x1, x2, x3), atol=10e-5)
def test_conv_shape_inference(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv2 = torch.nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
def forward(self, input):
return self.conv2(input) + 2
x = torch.randn(20, 16, 50, 100)
self.run_test(Model(), x, atol=10e-5,
input_names=['x'],
dynamic_axes={'x': [0]})
def test_conv_transpose(self):
class TraceModel(torch.nn.Module):
def __init__(self):
super(TraceModel, self).__init__()
self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
class ScriptModel(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModel, self).__init__()
self.conv1 = torch.nn.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
self.conv3 = torch.nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
@torch.jit.script_method
def forward(self, input1, input2, input3):
return self.conv1(input1), self.conv2(input2), self.conv3(input3)
x1 = torch.randn(20, 16, 50)
x2 = torch.randn(20, 16, 50, 100)
x3 = torch.randn(20, 16, 10, 50, 100)
self.run_test(TraceModel(), (x1, x2, x3), atol=10e-5)
self.run_test(ScriptModel(), (x1, x2, x3), atol=10e-5)
# Conversion of Transpose depends on input shape to be known.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(False)
def test_transpose_infer_shape(self):
class TransposeModule(torch.jit.ScriptModule):
def __init__(self):
super(TransposeModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.transpose(0, 1)
x = torch.randn(32, 3, 64, 64)
self.run_test(TransposeModule(), x)
def squeeze_model_tests(self, d, x1, x2):
class Squeeze(torch.nn.Module):
def __init__(self, d):
super(Squeeze, self).__init__()
self.d = d
def forward(self, x):
if self.d is not None:
return torch.squeeze(x, dim=self.d)
else:
return torch.squeeze(x)
x2 = [] if x2 is None else [x2]
self.run_test(Squeeze(d), x1, input_names=['input'], dynamic_axes={'input': {0: '0', 1: '1', 2: '2'}}, test_with_inputs=x2)
def test_squeeze_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(1, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(1, x_squeeze, x_noop)
def test_squeeze_neg_without_no_op(self):
x = torch.randn(2, 1, 4)
self.squeeze_model_tests(-2, x, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_neg(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(-2, x_squeeze, x_noop)
def test_squeeze_all_dims(self):
x_squeeze = torch.randn(2, 1, 4)
x_noop = torch.randn(2, 2, 3)
self.squeeze_model_tests(None, x_squeeze, x_noop)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_no_op(self):
x_noop = torch.randn(2, 1, 4)
x_squeeze = torch.randn(2, 2, 1)
self.squeeze_model_tests(2, x_noop, x_squeeze)
def test_squeeze_no_op_without_additional_inputs(self):
x_noop = torch.randn(2, 1, 4)
self.squeeze_model_tests(2, x_noop, None)
@skipIfUnsupportedMinOpsetVersion(11)
def test_squeeze_runtime_dim(self):
class Squeeze(torch.nn.Module):
def forward(self, d1, d2):
t = torch.zeros(d1[0], d2[0])
return t.squeeze(0)
d1 = torch.tensor([1])
d3 = torch.tensor([3])
d4 = torch.tensor([4])
self.run_test(Squeeze(), (d1, d4), test_with_inputs=[(d3, d4)])
self.run_test(Squeeze(), (d3, d4), test_with_inputs=[(d1, d3)])
def test_unsqueeze(self):
class Unsqueeze(torch.nn.Module):
def forward(self, x):
return torch.unsqueeze(x, dim=-2)
x = torch.randn(2, 3, 4)
self.run_test(Unsqueeze(), x)
def test_maxpool_default_stride(self):
class MaxPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.max_pool2d(x, 2)
model = MaxPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
def test_maxpool_adaptive(self):
model = torch.nn.AdaptiveMaxPool1d((5), return_indices=False)
x = torch.randn(20, 16, 50, requires_grad=True)
self.run_test(model, x)
def test_maxpool_2d(self):
model = torch.nn.MaxPool2d(5, padding=(1, 2))
x = torch.randn(1, 20, 16, 50, requires_grad=True)
self.run_test(model, x)
def test_maxpool_1d_ceil(self):
model = torch.nn.MaxPool1d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_maxpool_2d_ceil(self):
model = torch.nn.MaxPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_maxpool_3d_ceil(self):
model = torch.nn.MaxPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(8)
@disableScriptTest() # Functional module not scriptable
def test_maxpool_with_indices(self):
model = torch.nn.MaxPool1d(2, stride=1, return_indices=True)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_maxpool_dilation(self):
model = torch.nn.MaxPool1d(2, stride=1, dilation=2)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_default_stride(self):
class AvgPoolModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.avg_pool2d(x, 2)
model = AvgPoolModel()
x = torch.randn(10, 20, 16, 50)
self.run_test(model, x)
def test_avgpool(self):
model = torch.nn.AvgPool1d(2, stride=1)
x = torch.randn(20, 16, 50)
self.run_test(model, x)
def test_avgpool_1d_ceil(self):
model = torch.nn.AvgPool1d(3, 2, ceil_mode=True)
x = torch.randn(1, 1, 7)
self.run_test(model, x)
def test_avgpool_2d_ceil(self):
model = torch.nn.AvgPool2d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 32)
self.run_test(model, x)
def test_avgpool_3d_ceil(self):
model = torch.nn.AvgPool3d(3, 2, ceil_mode=True)
x = torch.randn(20, 16, 50, 44, 31)
self.run_test(model, x)
def test_arithmetic(self):
class ArithmeticModule(torch.nn.Module):
def forward(self, x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3, 4)
self.run_test(ArithmeticModule(), x)
# In scripting the first transpose node do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(False)
def test_arithmetic_infer_dtype(self):
class ArithmeticModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x = x.t()
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
x = torch.randn(2, 3)
self.run_test(ArithmeticModule(), x)
def test_floor_div(self):
class FloorDivModule(torch.nn.Module):
def forward(self, x, y):
return x // 3, x // 2., \
x.to(dtype=torch.float64) // 3, x.to(dtype=torch.float64) // 2., \
x.to(dtype=torch.int64) // 3, x.to(dtype=torch.int64) // 2., \
x // (y + 1.).to(dtype=torch.int64), x // y, \
x.to(dtype=torch.float64) // y.to(dtype=torch.int64), x.to(dtype=torch.float64) // y.to(dtype=torch.float64), \
x.to(dtype=torch.int64) // y.to(dtype=torch.int64), x.to(dtype=torch.int64) // y
x = torch.randn(2, 3, 4)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
def test_floor_div_script(self):
class FloorDivModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return x // 3, x // 2., x // y
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(FloorDivModule(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_floordiv(self):
class FloordivModule(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.size(2) // x.size(1))
x = torch.randn(2, 3, 4)
self.run_test(FloordivModule(), (x,))
def test_div(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
self.run_test(DivModule(), (x, y))
self.run_test(DivModule(), (x.float(), y.float()))
# Note: div cannot (generally) be exported via scripting
# since its type promotion logic is dependent on knowing the scalar types
# of the input tensors. That is, the ONNX graph is dependent on the
# data type of the inputs. This makes it appropriate for tracing only.
def test_div_promotion_trace(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.trace(DivModule(), (x, y)), (x, y))
torch.set_default_dtype(prev_default)
# In scripting x, y do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(False)
def test_div_promotion_script(self):
class DivModule(torch.nn.Module):
def forward(self, x, y):
# Add transpose to hide shape/type information
# Otherwise shape and type are still avaiable from input.
x = x.transpose(1, 2)
y = y.transpose(1, 2)
return x / y, torch.true_divide(x, y)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.int)
prev_default = torch.get_default_dtype()
# 1. x,y are int, and output is float.
# This can be handled by the default case, where both are cast to float.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.float)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 2. x,y are int, and output is double.
# This can be handled by the default case, where both are cast to double.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
# 3. x is int, y is double, and output is double.
# This can only be handled when both type of x and y are known.
torch.set_default_dtype(prev_default)
x = torch.randn(2, 3, 4).to(torch.int)
y = torch.arange(1, 2 * 3 * 4 + 1).reshape(2, 3, 4).to(torch.double)
self.run_test(torch.jit.script(DivModule()), (x, y))
def test_slice_trace(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x[0:1]
x = torch.randn(3)
self.run_test(MyModule(), x)
def test_slice_neg(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[-1:]
x = torch.randn(3, 4, 5)
self.run_test(NegSlice(), x)
def test_slice_neg_large(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, -3:-1, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
def test_slice_neg_large_negone(self):
class NegSlice(torch.nn.Module):
def forward(self, x):
return x[:, :, :, :, -1]
x = torch.randn(3, 4, 5, 6, 7)
self.run_test(NegSlice(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_slice_with_input_index(self):
class InputIndexSlice(torch.nn.Module):
def forward(self, x, y):
x[:y.size(0), 0, :] = y
return x
x = torch.zeros((56, 6, 256))
y = torch.rand((22, 256))
self.run_test(InputIndexSlice(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
@disableScriptTest() # scripting tuple/list append
def test_slice_dynamic(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:x.size(0) - i, i:x.size(2), i:3])
return tuple(results)
x = torch.rand(5, 5, 5)
y = torch.randn(6, 7, 8)
self.run_test(DynamicSliceExportMod(), x, test_with_inputs=[y],
input_names=['input_1'],
output_names=['output_1'],
dynamic_axes={'input_1': [0, 1, 2],
'output_1': [0, 1, 2]})
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_script(self):
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1:x.size(1)]
x = torch.rand(1, 2)
self.run_test(DynamicSliceModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_slice_dynamic_shape_script(self):
class DynamicSliceModel(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1:x.size(2)])
x = torch.rand(1, 2, 3, 4)
self.run_test(DynamicSliceModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
@disableScriptTest() # scripting tuple/list append
def test_slice_dynamic_to_end(self):
class DynamicSliceExportMod(torch.nn.Module):
def forward(self, x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.rand(5, 5, 5)
self.run_test(DynamicSliceExportMod(), x,
dynamic_axes={'input_1': [0, 1, 2],
'output_1': [0, 1, 2]})
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_dynamic(self):
class ArangeModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.shape[0]), \
torch.arange(12), \
torch.arange(start=input.shape[0], end=input.shape[0] + 5)
x = torch.randn(5, 3, 2)
y = torch.randn(8, 3, 2)
self.run_test(ArangeModel(), x, test_with_inputs=[y],
input_names=['input_1'],
output_names=['output_1', 'output_2', 'output_3'],
dynamic_axes={'input_1': [0],
'output_1': [0]})
self.run_test(torch.jit.script(ArangeModel()), x,
test_with_inputs=[y], input_names=['input_1'],
output_names=['output_1', 'output_2', 'output_3'],
dynamic_axes={'input_1': [0],
'output_1': [0]})
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(end, out=out_t)
x = torch.tensor(8)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_dynamic_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.int64)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8)
self.run_test(ArangeStartOutModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange(self):
class ArangeModel(torch.nn.Module):
def forward(self, start, end):
return torch.arange(start.size(0), end, 1.5, dtype=torch.int64)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_out(self):
class ArangeOutModel(torch.nn.Module):
def forward(self, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(end, out=out_t)
x = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeOutModel(), (x))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_out(self):
class ArangeStartOutModel(torch.nn.Module):
def forward(self, start, end):
out_t = torch.tensor([1], dtype=torch.float)
return torch.arange(start.size(0), end, out=out_t)
x = torch.randn(2, 3, 4)
y = torch.tensor(8.5, dtype=torch.float)
self.run_test(ArangeStartOutModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_no_type(self):
class ArangeModel(torch.nn.Module):
def forward(self, end):
return torch.arange(end), \
torch.arange(0, end)
x = torch.tensor(6.2, dtype=torch.float)
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_size(self):
class SizeModel(torch.nn.Module):
def forward(self, input):
return torch.arange(input.size(0)), torch.arange(input.size(-1)), torch.ones(input.shape)
x = torch.randn(5, 3, 2)
self.run_test(SizeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # x.stride() not scriptable
def test_as_strided(self):
class Model(torch.nn.Module):
def forward(self, x):
chunk_size = list(x.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(x.stride())
chunk_stride[1] = chunk_stride[1] // 2
return x.as_strided((3, 3, 3), (1, 4, 2), storage_offset=2), x.as_strided(chunk_size, chunk_stride)
x = torch.randn(5, 8, 7)
self.run_test(Model(), x)
@disableScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_tensor_index_advanced_indexing_ellipsis(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[..., torch.tensor([2, 1]), torch.tensor([0, 3])]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([[0, 2], [1, 1]]), :, torch.tensor([2, 1]), torch.tensor([0, 3])]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([0, 2]), None, 2:4, torch.tensor([[1, 3], [4, 0]])]
self.run_test(MyModel(), (m1,))
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([0, 2]), torch.tensor([1]), 2:4, torch.tensor([[1], [4]])]
self.run_test(MyModel(), (m1,))
def test_tensor_index_advanced_indexing_consecutive(self):
class MyModel(torch.nn.Module):
def forward(self, input):
return input[:, torch.tensor([0, 2]), torch.tensor([[1, 3], [4, 0]]), None]
m1 = torch.randn(3, 4, 5, 6, 7)
self.run_test(MyModel(), (m1,))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
x[ind] = update
return x
x = torch.randn(3, 4)
ind = torch.tensor([1], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_accumulate(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, ind, update):
return x.index_put((ind, ), update, accumulate=True)
x = torch.randn(3, 4)
ind = torch.tensor([2], dtype=torch.long)
update = torch.ones(4)
self.run_test(IndexPutModel(), (x, ind, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_index_put_slice_index(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[1:2, 1:3, torch.tensor([1])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(1, 2, 1)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), torch.tensor([1, 2])] += update
return x
x = torch.randn(3, 4, 5)
update = torch.randn(2, 5)
self.run_test(IndexPutModel2(), (x, update))
class IndexPutModel3(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 1:2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1, 1)
self.run_test(IndexPutModel3(), (x, update))
class IndexPutModel4(torch.nn.Module):
def forward(self, x, update):
x[torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel4(), (x, update))
class IndexPutModel5(torch.nn.Module):
def forward(self, x, update):
x[1:3, torch.tensor([0, 2]), 2] += update
return x
x = torch.randn(3, 4, 5)
update = torch.tensor([10, 15]).view(2, 1)
self.run_test(IndexPutModel5(), (x, update))
class IndexPutModel6(torch.nn.Module):
def forward(self, x, update):
x[1:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel6(), (x, update))
class IndexPutModel7(torch.nn.Module):
def forward(self, x, update):
x[1:, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(2 * 5).to(torch.float).view(2, 5)
self.run_test(IndexPutModel7(), (x, update))
class IndexPutModel8(torch.nn.Module):
def forward(self, x, update):
x[:3, 0] = update
return x
x = torch.randn(3, 4, 5)
update = torch.arange(3 * 5).to(torch.float).view(3, 5)
self.run_test(IndexPutModel8(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Ellipses followed by tensor indexing not scriptable
def test_index_put_ellipsis(self):
class IndexPutModel(torch.nn.Module):
def forward(self, x, update):
x[..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(3, 1, 1, 3, 2)
self.run_test(IndexPutModel(), (x, update))
class IndexPutModel2(torch.nn.Module):
def forward(self, x, update):
x[2, ..., torch.tensor([2, 1, 3]), 2:4] += update
return x
x = torch.randn(3, 4, 5, 6, 7)
update = torch.randn(4, 1, 3, 2)
self.run_test(IndexPutModel2(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(2, 4)
self.run_test(CopyModel(), (x, update))
# mixed slice and select
class CopyModel2(torch.nn.Module):
def forward(self, x, data):
x[1:3, 0] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel2(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel2(), (x, update))
class CopyModel3(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.tensor([0], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.tensor([2, 3], dtype=torch.float32)
self.run_test(CopyModel3(), (x, update))
update = torch.randn(2)
self.run_test(CopyModel3(), (x, update))
class CopyModel4(torch.nn.Module):
def forward(self, x, ind, data):
x[ind] = data
return x
x = torch.randn(3, 4)
ind = torch.tensor(2)
data = torch.randn(4)
self.run_test(CopyModel4(), (x, ind, data))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Model not scriptable (output with shape doesn't match the broadcast shape)
def test_copy_tracing(self):
class CopyModel(torch.nn.Module):
def forward(self, x, data):
x[1, 1:3] = data
return x
x = torch.randn(3, 4)
update = torch.randn(1, 2)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
def test_copy_ellipsis(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
x[..., 1] = update
return x
x = torch.randn(2, 3, 4)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
x = torch.randn(2, 3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Missing input size (with ellipsis indexing)
def test_copy_ellipsis_tracing(self):
class CopyModel(torch.nn.Module):
def forward(self, x, update):
x[2, ..., 1:3] = update
return x
x = torch.randn(3, 4, 5, 6)
update = torch.ones(1)
self.run_test(CopyModel(), (x, update))
@skipIfUnsupportedMinOpsetVersion(10)
def test_flip(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flip(x, dims=[0])
x = torch.tensor(np.arange(6.0).reshape(2, 3))
self.run_test(MyModule(), x)
def test_random(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.randn(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, (torch.rand(2, 3, 4) + x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # symbolic update for randn
def test_random_dynamic_size(self):
class RandN(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(RandN(), x)
class Rand(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand(x.size()).size(1))
x = torch.randn(2, 3, 4)
self.run_test(Rand(), x)
def test_random_like(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.randn_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
self.run_test(torch.jit.script(RandNLike()), x)
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x, torch.rand_like(x).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
self.run_test(torch.jit.script(RandLike()), x)
def test_random_like_dtype(self):
class RandNLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x.to(torch.double), torch.randn_like(x, dtype=torch.double).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandNLike(), x)
class RandLike(torch.nn.Module):
def forward(self, x):
return torch.mul(x.to(torch.double), torch.rand_like(x, dtype=torch.double).size(0))
x = torch.randn(2, 3, 4)
self.run_test(RandLike(), x)
def _interpolate(self, x, mode, use_size, is_upsample, align_corners=False):
class MyModel(torch.nn.Module):
def forward(self, x):
scale = 2.3 if is_upsample else 0.5
if len(x.size()) == 3:
scale_array = 2.3
if len(x.size()) == 4:
scale_array = [2.3, 5.1]
if len(x.size()) == 5:
scale_array = [3.3, 2.3, 5.1]
if use_size:
size_array = [int(float(v) * scale) for v in x.size()[2:]]
if align_corners:
return torch.nn.functional.interpolate(x, mode=mode, size=size_array[0], align_corners=True), \
torch.nn.functional.interpolate(x, mode=mode, size=size_array, align_corners=True)
return torch.nn.functional.interpolate(x, mode=mode, size=size_array[0]), \
torch.nn.functional.interpolate(x, mode=mode, size=size_array)
if align_corners:
return torch.nn.functional.interpolate(x, mode=mode, scale_factor=scale,
align_corners=True, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=mode, scale_factor=scale_array,
align_corners=True, recompute_scale_factor=False)
return torch.nn.functional.interpolate(x, mode=mode,
scale_factor=scale, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=mode,
scale_factor=scale_array, recompute_scale_factor=False)
self.run_test(MyModel(), x)
def _interpolate_script(self, x, mode, use_size, is_upsample, align_corners=False):
class MyModel(torch.jit.ScriptModule):
__constants__ = ['mode', 'use_size', 'is_upsample', 'size', 'scale', 'size_array', 'scale_array', 'align_corners']
def __init__(self, mode, use_size, is_upsample, align_corners):
super(MyModel, self).__init__()
self.mode = mode
self.use_size = use_size
self.is_upsample = is_upsample
self.align_corners = align_corners
self.scale = 2.0 if self.is_upsample else 0.5
self.size = 24 if self.is_upsample else 2
if x.dim() == 3:
self.scale_array = [2.3]
self.size_array = [16]
elif x.dim() == 4:
self.scale_array = [2.3, 3.1]
self.size_array = [16, 32]
else:
self.scale_array = [2.3, 3.1, 4.6]
self.size_array = [16, 32, 64]
@torch.jit.script_method
def forward(self, x):
if self.use_size:
if self.align_corners:
return torch.nn.functional.interpolate(x, mode=self.mode, size=self.size, align_corners=True), \
torch.nn.functional.interpolate(x, mode=self.mode, size=self.size_array, align_corners=True)
return torch.nn.functional.interpolate(x, mode=self.mode, size=self.size), \
torch.nn.functional.interpolate(x, mode=self.mode, size=self.size_array)
if self.align_corners:
return torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale_array, recompute_scale_factor=False)
return torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale, recompute_scale_factor=False), \
torch.nn.functional.interpolate(x, mode=self.mode,
scale_factor=self.scale_array, recompute_scale_factor=False)
model = MyModel(mode, use_size, is_upsample, align_corners)
self.run_test(model, x, atol=1e-6)
def _interpolate_tests(self, is_upsample):
# - cubic mode is not supported for opsets below 11;
# - linear mode does not match for opsets below 11;
modes = ["nearest", "linear", "bicubic"]
if self.opset_version < 11:
modes = ["nearest"]
x = [torch.randn(1, 2, 6, requires_grad=True),
torch.randn(1, 2, 4, 6, requires_grad=True),
torch.randn(1, 2, 4, 4, 6, requires_grad=True)]
for mode in modes:
for xi in x:
mode_i = mode
# TODO: enable bicubic downsample when ORT precision loss fixed
if mode == "bicubic" and xi.dim() != 4:
continue
elif mode == "linear":
if xi.dim() == 3:
# TODO : enable when linear mode is implemented for 1d inputs in ORT
continue
elif xi.dim() == 4:
mode_i = "bilinear"
elif xi.dim() == 5:
# TODO : enable when linear mode is implemented for 3d inputs in ORT
mode_i = "trilinear"
continue
self._interpolate(xi, mode_i, True, is_upsample)
# test with align_corners if supported
if mode != 'nearest':
self._interpolate(xi, mode_i, True, is_upsample, True)
self._interpolate_script(xi, mode_i, True, is_upsample, True)
# the following cases, require dynamic sizes/scales,
# which which is not supported for opset_version < 9
if self.opset_version >= 9:
self._interpolate_script(xi, mode_i, True, is_upsample)
self._interpolate(xi, mode_i, False, is_upsample)
# test with align_corners if supported
if mode != 'nearest':
self._interpolate(xi, mode_i, False, is_upsample, True)
self._interpolate_script(xi, mode_i, False, is_upsample, True)
self._interpolate_script(xi, mode_i, False, is_upsample)
@disableScriptTest()
def test_interpolate_upsample(self):
self._interpolate_tests(True)
@disableScriptTest()
@skipIfUnsupportedMinOpsetVersion(9)
def test_interpolate_function_substitution(self):
class ScriptModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.nn.functional.interpolate(x, mode="nearest", scale_factor=2.)
class ScriptModule(torch.jit.ScriptModule):
def __init__(self):
super(ScriptModule, self).__init__()
self.submodule = ScriptModel()
@torch.jit.script_method
def forward(self, input):
return self.submodule(input)
x = torch.randn(1, 2, 4, 4, 6)
self.run_test(ScriptModule(), (x,))
@torch.jit.script
def script_method(x):
return torch.nn.functional.interpolate(x, mode="nearest", scale_factor=2.)
class TracingModule(torch.nn.Module):
def forward(self, x):
return script_method(x)
self.run_test(TracingModule(), (x,))
@skipIfUnsupportedMinOpsetVersion(10)
@disableScriptTest()
def test_interpolate_downsample(self):
self._interpolate_tests(False)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_interpolate_no_shape(self):
class MyModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x = torch.add(x, x)
out1 = torch.nn.functional.interpolate(x, mode="bilinear", size=(16, 16), align_corners=False)
out2 = torch.nn.functional.interpolate(x, mode="nearest", size=(int(y.size(0)), int(y.size(1))))
return out1, out2
x = torch.randn(1, 2, 4, 4, requires_grad=True)
y = torch.randn(16, 16, requires_grad=True)
self.run_test(MyModel(), (x, y))
def test_interpolate_adaptive_pooling_error(self):
x = torch.randn(1, 2, 6, requires_grad=True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", True, True)
with self.assertRaises(RuntimeError) as cm:
self._interpolate(x, "area", False, True)
def test_groupnorm(self):
model = torch.nn.GroupNorm(3, 6, 0.002)
x = torch.randn(4, 6, 180, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
@disableScriptTest()
def test_groupnorm_noaffine(self):
model = torch.nn.GroupNorm(4, 8, 0.002, affine=False)
x = torch.randn(3, 8, 224, 224)
self.run_test(model, x)
model = torch.nn.GroupNorm(1, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
model = torch.nn.GroupNorm(6, 6, 0.002, affine=False)
x = torch.randn(4, 6, 180, 180)
self.run_test(model, x)
def test_std(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_pow(self):
class PowModule(torch.nn.Module):
def forward(self, x, y):
return x.pow(y)
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4)).to(dtype=torch.int32)
self.run_test(PowModule(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
x = torch.randn(2, 3, 4).to(dtype=torch.float64)
y = torch.randint(10, (2, 3, 4))
self.run_test(PowModule(), (x, y))
def test_std_along_dims(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_std_keepdim(self):
class StandardDeviation(torch.nn.Module):
def forward(self, input):
return torch.std(input, dim=(0, 1), unbiased=False, keepdim=True)
x = torch.randn(2, 3, 4)
model = StandardDeviation()
self.run_test(model, x)
def test_bitshift(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input, input2):
return input >> 1, input << 3.1, \
input2 >> torch.tensor([1, 2]), input2 << 4.2
input = torch.arange(24, dtype=torch.float32).reshape(3, 4, 2)
input2 = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), (input, input2))
def test_bitshift_other_fp(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input):
return input << 2.4
input = torch.arange(24, dtype=torch.int64).reshape(3, 4, 2)
self.run_test(BitshiftModel(), input)
# uint8 not implemented in ORT for Mul used in
# exporting bitshift for opset_version < 10
@skipIfUnsupportedMinOpsetVersion(11)
def test_bitshift_uint8(self):
class BitshiftModel(torch.nn.Module):
def forward(self, input, input2):
return input >> 1, input << 3., \
input2 >> torch.tensor([1, 2], dtype=torch.uint8), input2 << 4.
input = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
input2 = torch.arange(24, dtype=torch.uint8).reshape(3, 4, 2)
self.run_test(BitshiftModel(), (input, input2))
def test_narrow(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, 2)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_narrow_dynamic(self):
class NarrowModel(torch.nn.Module):
def forward(self, input):
return torch.narrow(input, 0, 0, input.shape[0] - 1)
x = torch.randn(3, 3, requires_grad=True)
self.run_test(NarrowModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_fill(self):
class IndexFillModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
return input.index_fill(2, index, -1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexFillModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_index_copy(self):
class IndexCopyModel(torch.nn.Module):
def forward(self, input):
index = torch.tensor([2, 0])
source = torch.ones(3, 2, 5)
return input.index_copy(1, index, source)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(IndexCopyModel(), x)
def test_select(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, 1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
def test_select_negative_index(self):
class Select(torch.nn.Module):
def forward(self, x):
return x[:, -1]
x = torch.randn(3, 4)
self.run_test(Select(), x)
# TODO: enable for opset 10 when ONNXRuntime version will be updated
def test_index_select_constant_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def forward(self, x):
index = 2
return torch.index_select(x, 1, torch.tensor(index))
x = torch.randn(3, 4)
self.run_test(IndexSelectScalerIndexModel(), x)
def test_index_select_scaler_index(self):
class IndexSelectScalerIndexModel(torch.nn.Module):
def __init__(self, index_base):
super(IndexSelectScalerIndexModel, self).__init__()
self.index_base = torch.tensor(index_base)
def forward(self, x, index_offset):
index = self.index_base + index_offset
return torch.index_select(x, 1, index)
x = torch.randn(3, 4)
offset = 2
index_offset = torch.tensor(offset)
base = 1
self.run_test(IndexSelectScalerIndexModel(base), (x, index_offset))
def test_take(self):
class TakeModel(torch.nn.Module):
def forward(self, x, y):
return torch.take(x, y)
x = torch.randn(6, 4, 3, 3)
y = torch.tensor([4, 1, 7, 15, 63])
self.run_test(TakeModel(), (x, y))
def test_topk(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.topk(x, 3)
x = torch.arange(1., 6., requires_grad=True)
self.run_test(MyModule(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_topk_smallest_unsorted(self):
class MyModule(torch.nn.Module):
def forward(self, x, k):
# When sorted=False, order of elements in the outout tensors
# are not expected to match between PyTorch and ORT
topk_unsorted = torch.topk(x, k, largest=False, sorted=False)
topk_sorted = torch.topk(x, k, largest=False, sorted=True)
return topk_sorted, torch.sort(topk_unsorted.values).values
x = torch.arange(1., 6., requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModule(), (x, k))
@skipIfUnsupportedMinOpsetVersion(10)
def test_topk_script(self):
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, k):
return torch.topk(x, k)
x = torch.arange(1., 6., requires_grad=True)
k = torch.tensor(3)
self.run_test(MyModuleDynamic(), [x, k])
@skipIfUnsupportedOpsetVersion([7])
def test_normalize(self):
class Model(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.normalize(x)
x = torch.randn(3, 3)
self.run_test(Model(), x)
def test_layer_norm(self):
model = torch.nn.LayerNorm([10, 10])
x = torch.randn(20, 5, 10, 10)
self.run_test(model, x)
def test_batchnorm1d(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=True)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm1d_noaffine(self):
x = torch.randn(10, 10)
model = torch.nn.BatchNorm1d(10, affine=False)
self.run_test(model, x)
x = torch.randn(10, 10, 128)
self.run_test(model, x)
def test_batchnorm2d(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=True)
self.run_test(model, x)
def test_batchnorm2d_noaffine(self):
x = torch.randn(10, 3, 128, 128)
model = torch.nn.BatchNorm2d(3, affine=False)
self.run_test(model, x)
def test_batchnorm3d(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.BatchNorm3d(3, affine=True)
self.run_test(model, x)
def test_batchnorm3d_noaffine(self):
x = torch.randn(10, 3, 128, 128, 128)
model = torch.nn.BatchNorm3d(3, affine=False)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=torch.float64)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_with_scalar_different_types(self):
# Tests the case when scalar src (updates values) type is different
# from self type. Happens only with scalar src - PyTorch does not
# allow this when src is a tensor.
class ScatterModel(torch.nn.Module):
def forward(self, input, indices):
values = 1.0
return input.scatter(1, indices, values)
input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=torch.float32)
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(ScatterModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter(1, indices, values)
input = torch.tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input=(input, indices, values))
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 5, 6)
indices = torch.tensor([[1, 0], [0, 2], [0, 1]], dtype=torch.int64)
indices = indices.view(3, 2, 1, 1).expand(3, 2, 5, 6)
values = torch.arange(3 * 2 * 5 * 6, dtype=torch.float32).view(3, 2, 5, 6)
self.run_test(ScatterModel(), (input, indices, values))
input = torch.zeros(3, 4, 2)
indices = torch.tensor([[[1, 0], [0, 2]], [[1, 1], [0, 1]], [[2, 1], [2, 2]]])
values = torch.arange(3 * 2 * 2, dtype=torch.float32).view(3, 2, 2)
self.run_test(ScatterModel(), (input, indices, values))
@skipIfUnsupportedMinOpsetVersion(9)
def test_scatter_add(self):
class ScatterModel(torch.nn.Module):
def forward(self, input, indices, values):
return input.scatter_add(1, indices, values)
input = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]])
self.run_test(ScatterModel(), input=(input, indices, values))
@skipIfUnsupportedMinOpsetVersion(9)
def test_one_hot(self):
class OneHot(torch.nn.Module):
def __init__(self, num_classes):
super().__init__()
self.num_classes = num_classes
def forward(self, x):
return torch.nn.functional.one_hot(x, self.num_classes)
x = torch.arange(10)
self.run_test(OneHot(15), (x))
@skipIfUnsupportedMinOpsetVersion(9)
def test_gather(self):
class GatherModel(torch.nn.Module):
def forward(self, input, indices):
return input.gather(1, indices)
input = torch.tensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
indices = torch.tensor([[1, 0], [0, 1], [0, 1]], dtype=torch.int64)
self.run_test(GatherModel(), input=(input, indices))
@skipIfUnsupportedMinOpsetVersion(9)
def test_expand(self):
class ExpandModel(torch.nn.Module):
def forward(self, input):
return input.expand(2, 3, -1)
input = torch.randn(2, 1, 4)
self.run_test(ExpandModel(), input=(input))
class ExpandInferDimModel(torch.nn.Module):
def forward(self, input):
return input.expand(-1, input.size(0))
input = torch.randn(3, 1)
self.run_test(ExpandInferDimModel(), input=(input))
class ExpandTensorSizeModel(torch.nn.Module):
def forward(self, input, size):
return input.expand(size)
input = torch.randn(3,)
size = torch.tensor(-1)
self.run_test(ExpandTensorSizeModel(), input=(input, size))
def test_multinomial(self):
class Multinomial(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 3, replacement=True)
class MultinomialNoReplacement(torch.nn.Module):
def forward(self, weight):
return torch.multinomial(weight, 1)
weight = torch.tensor([[0, 10, 0, 0], [0, 0, 100, 0]], dtype=torch.float)
self.run_test(Multinomial(), (weight,))
self.run_test(MultinomialNoReplacement(), (weight,))
def _test_reduced_ops(self, op):
class ReducedOpModule(torch.nn.Module):
def forward(self, input):
return op(input, dim=-1)
if op != torch.mean: # torch.mean only supports float types
x = torch.randint(10, (4, 4), dtype=torch.uint8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int8)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int16)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedOpModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedOpModule(), x)
# torch.mean only supports float types
# ORT does not support double ReduceProd for double
if op != torch.prod and op != torch.mean:
x = torch.randn(4, 5, dtype=torch.double)
self.run_test(ReducedOpModule(), x)
if op != torch.prod: # torch.prod not implemented for Half
x = torch.randn(4, 4, dtype=torch.half)
self.run_test(ReducedOpModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedOpModule(), x)
def test_reduced_sum(self):
return self._test_reduced_ops(op=torch.sum)
def test_reduced_mean(self):
return self._test_reduced_ops(op=torch.mean)
def test_reduced_prod(self):
return self._test_reduced_ops(op=torch.prod)
def test_reduced_min_max(self):
class ReducedMinMaxModule(torch.nn.Module):
def forward(self, input):
return torch.min(input, dim=-1)[0], torch.max(input, dim=0)[0]
x = torch.randint(10, (4, 4), dtype=torch.int32)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randint(10, (4, 4), dtype=torch.int64)
self.run_test(ReducedMinMaxModule(), x)
x = torch.randn(4, 5, dtype=torch.float)
self.run_test(ReducedMinMaxModule(), x)
def test_reduce_log_sum_exp(self):
class ReduceLogSumExpModel(torch.nn.Module):
def forward(self, input):
a = torch.logsumexp(input, dim=0)
b = torch.logsumexp(input, dim=(0, 1))
return a + b
x = torch.randn(4, 4, requires_grad=True)
self.run_test(ReduceLogSumExpModel(), x)
def test_softmax(self):
for i in range(-4, 3):
model = torch.nn.Softmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 4, 5, 6))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_softmax_large_values(self):
input = torch.tensor([[-1e12, -1e12, -1e12], [1e12, 0.0, -5.0], [3.0, 4.0, 5.0]])
for i in range(-2, 1):
model = torch.nn.Softmax(dim=i)
self.run_test(model, input)
class SoftmaxUnknownRank(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.softmax = torch.nn.Softmax(dim=i)
def forward(self, x):
return self.softmax(x.reshape(3, 3))
model = torch.jit.script(SoftmaxUnknownRank(i))
self.run_test(model, input)
def test_logsoftmax(self):
for i in range(7)[2:]:
model = torch.nn.LogSoftmax(dim=i - 1)
dims = [2] * (i - 2) + [3, 4]
input = torch.ones(*dims, requires_grad=True)
self.run_test(model, input)
def test_logsoftmax_dim(self):
for i in range(-4, 3):
model = torch.nn.LogSoftmax(dim=i)
input = torch.randn(3, 4, 5, 6)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # scripting prim_dtype
def test_lstm_no_hidden(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.rnn = torch.nn.LSTM(input_size=16, hidden_size=16)
def forward(self, x):
return self.rnn(x)
input = torch.randn((10, 16, 16))
self.run_test(LSTMModel(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm(self):
model = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
h0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(1, BATCH_SIZE, RNN_HIDDEN_SIZE)
self.run_test(model, (input, (h0, c0)))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm_default_init_state(self):
model = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(model, input)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # LSTMModel model not scriptable
def test_lstm_fixed_batch_size(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super(LSTMModel, self).__init__()
self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, 1, bidirectional=False)
def forward(self, input):
batch_size = input.size()[1]
h0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
c0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
h0 = torch.from_numpy(h0_np)
c0 = torch.from_numpy(c0_np)
return self.lstm(input, (h0, c0))
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
# verify with different input of same batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(LSTMModel(), input, fixed_batch_size=True, test_with_inputs=[input2])
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm_post_fix_init_state(self):
class LSTMModel(torch.nn.Module):
def __init__(self):
super(LSTMModel, self).__init__()
self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE,
1, bidirectional=False)
def forward(self, input):
batch_size = input.size()[1]
h0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
c0_np = np.ones([1, batch_size, RNN_HIDDEN_SIZE]).astype(np.float32)
h0 = torch.from_numpy(h0_np)
c0 = torch.from_numpy(c0_np)
return self.lstm(input, (h0, c0))
model = LSTMModel()
input = torch.randn(RNN_SEQUENCE_LENGTH, 1, RNN_INPUT_SIZE)
# verify with different input of different batch size
input2 = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
self.run_test(model, input, dynamic_axes={'input' : {0 : 'seq', 1 : 'batch'}},
test_with_inputs=[input2])
@disableScriptTest()
def test_lstm_constant_folding(self):
class LstmNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = torch.nn.LSTM(input_size, hidden_size, num_layers, bidirectional=bidirectional)
def forward(self, input, initial_state):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = LstmNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
c0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, (h0, c0))
batch_size1 = 3
model1, input1 = get_LstmNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_LstmNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest()
def test_lstm_no_bias(self):
class LstmNet(torch.nn.Module):
def __init__(self, num_layers, bidirectional):
super(LstmNet, self).__init__()
self.lstm = torch.nn.LSTM(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, num_layers, bias=False, bidirectional=bidirectional)
def forward(self, input, initial_state):
return self.lstm(input, initial_state)
def get_LstmNet_model_and_inputs(num_layers, bidirectional):
input = torch.randn(RNN_SEQUENCE_LENGTH, BATCH_SIZE, RNN_INPUT_SIZE)
num_directions = 2 if bidirectional else 1
model = LstmNet(num_layers, bidirectional)
h0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
c0 = torch.randn(num_layers * num_directions, BATCH_SIZE, RNN_HIDDEN_SIZE)
return model, (input, (h0, c0))
num_layers = [1, 1, 2, 3]
bidirectional = [True, False, True, False]
models_and_inputs = [get_LstmNet_model_and_inputs(n, b) for n, b in zip(num_layers, bidirectional)]
for model, input in models_and_inputs:
self.run_test(model, input)
@disableScriptTest()
def test_rnn_no_bias(self):
def make_model(layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, bidirectional=False,
batch_first=batch_first, bias=False)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
return model
def make_input(batch_size, layers, packed_sequence):
batch_first = True if packed_sequence == 2 else False
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
h0 = torch.randn(layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
layers = [1, 3, 1, 3, 1, 3]
packed_sequence = [0, 0, 1, 1, 2, 2]
models = [make_model(l, p) for l, p in zip(layers, packed_sequence)]
inputs = [make_input(RNN_BATCH_SIZE, l, p) for l, p in zip(layers, packed_sequence)]
for model, input in zip(models, inputs):
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
def test_gru_no_bias(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional, bias=False)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
input_size = [7, 5]
hidden_size = [3, 4]
num_layers = [2, 3]
batch_size = [3, 4]
seq_len = [5, 7]
bidirectional = [True, False]
models_and_inputs = [get_GruNet_model_and_inputs(i, h, n, b, s, bi)
for i, h, n, b, s, bi in zip(input_size, hidden_size, num_layers, batch_size, seq_len, bidirectional)]
for model, input in models_and_inputs:
self.run_test(model, input, do_constant_folding=True)
def test_gru_constant_folding(self):
class GruNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(input_size, hidden_size, num_layers, bidirectional=bidirectional)
def forward(self, input, initial_state):
out = self.mygru(input, initial_state)
return out
def get_GruNet_model_and_inputs(input_size, hidden_size, num_layers, batch_size,
seq_len, bidirectional):
num_directions = 2 if bidirectional else 1
model = GruNet(input_size, hidden_size, num_layers, bidirectional)
input = torch.randn(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers * num_directions, batch_size, hidden_size)
return model, (input, h0)
batch_size1 = 3
model1, input1 = get_GruNet_model_and_inputs(7, 3, 2, batch_size1, 5, True)
self.run_test(model1, input1, do_constant_folding=True)
batch_size2 = 4
model2, input2 = get_GruNet_model_and_inputs(5, 4, 3, batch_size2, 7, False)
self.run_test(model2, input2, do_constant_folding=True)
@skipIfUnsupportedMinOpsetVersion(8)
def test_max_tensors(self):
class MaxModel(torch.nn.Module):
def forward(self, input, other):
return torch.max(input, other)
model = MaxModel()
x = torch.randn(4, 4, requires_grad=True)
y = torch.randn(4, 1, requires_grad=True)
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0), dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(a.size(0))
x = torch.randn(3, 4, requires_grad=True)
outputs = ArangeScript()(x)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(a.size(0))
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) + 2, dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2.7, a.size(0) + 2).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_arange_start_end_step(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2, a.size(0) * a.size(1) + 2, a.size(1), dtype=torch.float).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_arange_start_end_step_notype(self):
class ArangeScript(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1) + a
x = torch.randn(3, 4, requires_grad=True)
self.run_test(ArangeScript(), x)
class ArangeModel(torch.nn.Module):
def forward(self, a):
return torch.arange(2.7, a.size(0) * a.size(1) + 2, a.size(1)).view(-1, 1) + a
self.run_test(ArangeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test__dim_arange(self):
class DimArange(torch.nn.Module):
def forward(self, input):
return torch._dim_arange(input, 1)
x = torch.ones(5, 6)
self.run_test(DimArange(), x)
def _test_compare_ops(self, model, num_inputs):
x_float = torch.randn(1, 2, 3, 4, requires_grad=True)
x_int = torch.randint(10, (3, 4), dtype=torch.int32)
if num_inputs > 1:
y_float = torch.randn(1, 2, 3, 4, requires_grad=True)
y_int = torch.randint(10, (3, 4), dtype=torch.int32)
self.run_test(model, (x_float, y_float))
self.run_test(model, (x_float, y_int))
self.run_test(model, (x_int, y_float))
self.run_test(model, (x_int, y_int))
else:
self.run_test(model, x_float)
self.run_test(model, x_int)
def test_gt(self):
class GreaterModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(GreaterModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input >= other
self._test_compare_ops(GreaterOrEqualModel(), 2)
def test_gt_scalar(self):
class GreaterModel(torch.nn.Module):
def forward(self, input):
return input > 1
self._test_compare_ops(GreaterModel(), 1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_ge_scalar(self):
class GreaterOrEqualModel(torch.nn.Module):
def forward(self, input):
return input >= 1
self._test_compare_ops(GreaterOrEqualModel(), 1)
def test_lt(self):
class LessModel(torch.nn.Module):
def forward(self, input, other):
return input > other
self._test_compare_ops(LessModel(), 2)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input, other):
return input <= other
self._test_compare_ops(LessOrEqualModel(), 2)
def test_lt_scalar(self):
class LessModel(torch.nn.Module):
def forward(self, input):
return input < 1
self._test_compare_ops(LessModel(), 1)
@skipIfUnsupportedMinOpsetVersion(9)
def test_le_scalar(self):
class LessOrEqualModel(torch.nn.Module):
def forward(self, input):
return input <= 1
self._test_compare_ops(LessOrEqualModel(), 1)
def test_matmul(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(3, 4, requires_grad=True)
y = torch.randn(4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (3, 4))
y = torch.randint(10, (4, 5))
self.run_test(MatmulModel(), (x, y))
def test_matmul_batch(self):
class MatmulModel(torch.nn.Module):
def forward(self, input, other):
return torch.matmul(input, other)
x = torch.randn(2, 3, 4, requires_grad=True)
y = torch.randn(2, 4, 5, requires_grad=True)
self.run_test(MatmulModel(), (x, y))
x = torch.randint(10, (2, 3, 4))
y = torch.randint(10, (2, 4, 5))
self.run_test(MatmulModel(), (x, y))
def _argmin_argmax_model(self, input):
class ArgminArgmaxModel(torch.nn.Module):
def forward(self, input):
return torch.argmin(input), \
torch.argmax(input), \
torch.argmin(input, keepdim=True), \
torch.argmax(input, keepdim=True)
self.run_test(ArgminArgmaxModel(), input)
def test_argmin_argmax(self):
input = torch.randn(7, 3, 5)
self._argmin_argmax_model(input)
# Argmin and Argmax with "select_last_index" is not supprted before opset 12
# "select_last_index" was added in opset 12 to deal with corner case where the
# same value appears multiple times in the tensor
@skipIfUnsupportedMinOpsetVersion(12)
def test_argmin_argmax_select_last_index(self):
input = torch.tensor([[1., 2., 3.],
[1., 1., 2.]])
self._argmin_argmax_model(input)
input = torch.ones(7, 3, 5)
self._argmin_argmax_model(input)
def test_repeat(self):
class RepeatModel(torch.nn.Module):
def forward(self, x, y):
x2 = x.repeat(y.shape[0], 1)
y1 = y.view(-1, 1)
return x2 + y1
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 8, 9])
self.run_test(RepeatModel(), (x, y))
def test_view(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
return input.view(4, 24)
x = torch.randint(10, (4, 2, 3, 4), dtype=torch.int32)
self.run_test(ViewModel(), x)
def test_view_dynamic(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view(other.shape)
x = torch.randn(2, 3, 4)
shape = torch.randn(6, 4)
self.run_test(ViewModel(), (x, shape))
def test_view_dynamic_zero_dim(self):
class ViewModel(torch.nn.Module):
def forward(self, input):
input = input.view(-1, 2)
return input.view(1, -1)
x = torch.ones(2)
another_x = torch.empty((0,))
self.run_test(ViewModel(), x, test_with_inputs=[another_x],
input_names=['input_1'], dynamic_axes={'input_1': [0, ]})
def test_view_as(self):
class ViewModel(torch.nn.Module):
def forward(self, input, other):
return input.view_as(other)
x = torch.randn(2, 3, 4)
y = torch.randn(6, 4)
self.run_test(ViewModel(), (x, y))
@disableScriptTest() # ONNX Shape inference failure in if/else block for Gemm
def test_weight_norm(self):
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=1)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3))
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(1, 1, 3), dim=-2)
x = torch.randn(1, 1, 5, requires_grad=True)
self.run_test(model, x)
model = torch.nn.utils.weight_norm(torch.nn.Conv1d(3, 6, 3), name='weight')
x = torch.randn(3, 3, 5, requires_grad=True)
self.run_test(model, x)
@disableScriptTest() # ONNX Shape inference failure in if/else block for Gemm
def test_weight_norm_nodim(self):
model = torch.nn.utils.weight_norm(torch.nn.Linear(5, 10), dim=None)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(model, x)
def test_flatten(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
def test_flatten2d(self):
class FlattenModel(torch.nn.Module):
def forward(self, input):
return torch.flatten(input, 1)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
def test_flatten2d_neg(self):
class FlattenModel(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, 1, -1), torch.flatten(x, 0, -2), torch.flatten(x, 1, -2)
x = torch.randint(10, (1, 2, 3, 4))
self.run_test(FlattenModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_flatten_dynamic_axes(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return torch.flatten(x, start_dim=2, end_dim=3)
batch_size = 3
x = torch.randn(batch_size, 5, 4, 5)
y = torch.randn(5, 5, 4, 5)
model = MyModule()
self.run_test(model, x, test_with_inputs=[y],
input_names=['input'],
output_names=['output'],
dynamic_axes={'input' : {0 : 'batch_size'},
'output' : {0 : 'batch_size'}})
@skipIfUnsupportedMinOpsetVersion(11)
def test_getitem(self):
class GetItemModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y, z, ind):
# this will create prim::ListConstruct(x, y, z) + aten::__getitem__
arr = [x, y, z]
return arr[ind]
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
ind = torch.tensor(1, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
ind = torch.tensor(-2, dtype=torch.long)
self.run_test(GetItemModel(), (x, y, z, ind))
def test_unbind(self):
class UnbindModel(torch.nn.Module):
def forward(self, input):
_, out, _ = input.unbind()
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(1)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
class UnbindModel3(torch.nn.Module):
def forward(self, input):
_, out, _, _ = input.unbind(-2)
return out
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel3(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_len(self):
class LenModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return len(input.unbind()) + input
x = torch.randn(4, 5)
self.run_test(LenModel(), x, input_names=['input'], dynamic_axes={'input': {0: 'seq'}},
test_with_inputs=(torch.randn(5, 5),))
@skipIfUnsupportedMinOpsetVersion(9)
def test_len_list(self):
class LenListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return torch.ones(len(input.shape))
x = torch.randn(4, 5)
self.run_test(LenListModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unbind_dynamic(self):
class UnbindModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind()[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel(), x)
class UnbindModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.unbind(-1)[1]
x = torch.randn(3, 4, 5)
self.run_test(UnbindModel2(), x)
def test_split(self):
class SplitModel(torch.nn.Module):
def forward(self, input):
out1, out2, out3 = input.split([2, 1, 2])
return out1, out2, out3
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.nn.Module):
def forward(self, input):
out1, out2, out3 = input.split([2, 1, 1], -2)
return out1, out2, out3
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
class SplitModel3(torch.nn.Module):
def forward(self, input):
out1, out2, out3 = input.split([2, 1, 2])
return out3, out1
x = torch.randn(5, 4, 3)
self.run_test(torch.jit.script(SplitModel3()), x)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_split_size_as_list(self):
class SplitModel(torch.nn.Module):
def forward(self, input, split_sizes: List[int]):
out = []
split_list: List[torch.Tensor] = input.split(split_sizes)
for ob in split_list:
out.append(ob)
return torch.cat(out, dim=0)
x = torch.randn(6, 4, 3)
split_sizes = [torch.tensor(2), torch.tensor(4)]
self.run_test(SplitModel(), (x, split_sizes))
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_size_with_slice(self):
class SplitModule(torch.nn.Module):
def forward(self, x, y, t):
splits = (x.size(1), y.size(1))
out, out2 = torch.split(t, splits, dim=1)
return out, out2
x = torch.randn(2, 3)
y = torch.randn(2, 4)
t = torch.randn(2, 7)
self.run_test(SplitModule(), (x, y, t))
@skipIfUnsupportedMinOpsetVersion(11)
def test_split_dynamic(self):
class SplitModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel(), x)
class SplitModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.split(2, -3)[1]
x = torch.randn(5, 4, 3)
self.run_test(SplitModel2(), x)
def test_concat(self):
class ConcatModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.cat((x, y, z))
x = torch.randn(3, 4, 5)
y = torch.randn(1, 4, 5)
z = torch.randn(2, 4, 5)
self.run_test(ConcatModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_concat_dynamic(self):
class ConcatDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.cat(x.unbind())
x = torch.randn(4, 5, 6)
self.run_test(ConcatDynamicModel(), x)
def test_stack(self):
class StackModel(torch.nn.Module):
def forward(self, x, y, z):
return torch.stack((x, y, z), 1)
x = torch.randn(3, 4, 5)
y = torch.randn(3, 4, 5)
z = torch.randn(3, 4, 5)
self.run_test(StackModel(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(11)
def test_stack_dynamic(self):
class StackDynamicModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.stack(x.unbind(), 1)
x = torch.randn(4, 5, 6)
self.run_test(StackDynamicModel(), x)
def test_loop_dynamic(self):
class LoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(2)):
x = x + i
return x
model = LoopModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(9)
def test_loop_nested(self):
class NestedLoopsModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(5):
a = 0
while a < 4:
a += 1
x = x + a
return x
model = NestedLoopsModel()
inputs = torch.zeros(1, 2, 3, dtype=torch.long)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_with_list(self):
class ListLoopModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
res = []
res1 = []
arr = x.split([3, 4, 1, 1, 2, 3, 2], 0)
res2 = torch.zeros(3, 4, dtype=torch.long)
res3 = []
res4 = []
for i in range(len(arr)):
res = res.append(arr[i].sum(0, False))
res1 = res1.append(arr[-1 - i].sum(0, False))
res2 += 1
res3 = res3 + [arr[i].sum(0, False)]
res4 += [arr[-1 - i].sum(0, False)]
return torch.stack(res), torch.stack(res1), res2, torch.stack(res3), torch.stack(res4)
model = ListLoopModel()
inputs = torch.randn(16)
self.run_test(model, inputs)
@skipIfONNXShapeInference(False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_loop_transpose(self):
class LoopModel(torch.nn.Module):
def forward(self, x):
res = torch.zeros_like(x[0])
for i in range(x.size(0)):
res += x[0].transpose(0, 1)
return res
model = torch.jit.script(LoopModel())
x = torch.randn(5, 3, 3)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_list(self):
class ListModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
tensors = x.unbind()
res = []
res.append(tensors[0])
res.append(tensors[1])
res.pop(1)
res.insert(0, tensors[1])
res.append(tensors[2])
res += [tensors[3], tensors[4]]
res = res + [tensors[5]]
return torch.ones(len(res))
model = ListModel()
inputs = torch.randn(16, 1)
self.run_test(model, inputs)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.zeros(x.size()) + torch.ones(x.size())
x = torch.randn(2, 3, 4)
self.run_test(TensorFactory(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return torch.zeros(x.shape, dtype=torch.float) + torch.ones(x.shape, dtype=torch.float)
x = torch.randn(2, 3, 4)
self.run_test(TensorFactory(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_tensor_like_factories_script(self):
class TensorFactory(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
zeros = torch.zeros_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))
ones = torch.ones_like(x, dtype=torch.float, layout=torch.strided, device=torch.device('cpu'))
return zeros + ones
x = torch.randn(2, 3, 4)
self.run_test(TensorFactory(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_eye(self):
class TensorFactory(torch.nn.Module):
def forward(self, x):
return torch.eye(x.size()[1], 3), torch.eye(4, 4, dtype=torch.long), torch.eye(x.size()[1], 2, dtype=torch.long)
x = torch.randn(2, 3, 4)
another_x = torch.randn(5, 6, 7)
self.run_test(TensorFactory(), x, test_with_inputs=[another_x],
input_names=['input_1'], dynamic_axes={'input_1': [0, 1, 2]})
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_zero(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.zero_(), x
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_zeros(self):
class Zero_(torch.nn.Module):
def forward(self, x):
return x.new_zeros(x.shape[1:2]), x.new_zeros(x.shape[2:], dtype=torch.long)
x = torch.randn(2, 3, 4)
self.run_test(Zero_(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_list_pass(self):
class Slice(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape[2:] + y.shape[1:])
x = torch.randn(2, 3, 4, 5)
y = torch.randn(1, 2, 3, 4)
self.run_test(Slice(), (x, y))
class Size(torch.nn.Module):
def forward(self, x, y):
return x.new_zeros(x.shape + y.shape)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(Size(), (x, y))
class Array(torch.nn.Module):
def forward(self, x, y):
arr1 = [x.shape[0], x.shape[1], 2]
arr2 = [y.shape[0], y.shape[1]]
return x.new_zeros(arr1 + arr2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(Array(), (x, y))
class List(torch.nn.Module):
def forward(self, x, y):
l1 = list(x.shape)
l2 = list(y.shape)
return x.new_zeros(l1 + l2)
x = torch.randn(2, 3, 4)
y = torch.randn(1, 2, 3)
self.run_test(List(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_empty(self):
class Emtpy(torch.nn.Module):
def forward(self, x):
return x.new_empty(x.shape[0]).fill_(0), x.new_empty(x.shape[0], dtype=torch.long) * 0
x = torch.randn(2, 3, 4)
self.run_test(Emtpy(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_new_full(self):
class Full(torch.nn.Module):
def forward(self, x):
return x.new_full(x.shape[1:2], 5), x.new_full(x.shape[0:1], 1.3, dtype=torch.long)
x = torch.randn(2, 3, 4)
self.run_test(Full(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_list(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
return torch.cat([x.add_(3), y.fill_(0)])
x = torch.randn(2, 3)
y = torch.randn(2, 3)
self.run_test(Arithmetic(), (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_inplace_fill(self):
class Fill_(torch.nn.Module):
def forward(self, x):
return x.fill_(3), x
x = torch.randn(2, 3, 4)
self.run_test(Fill_(), x)
def test_inplace_arithmetic(self):
class Arithmetic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, y):
x.add_(3)
y.mul_(x)
return x, y
x = torch.randn(2, 3, 4)
y = torch.randn(2, 3, 4)
self.run_test(Arithmetic(), (x, y))
@disableScriptTest()
def test_sort(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=True))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest()
def test_sort_ascending(self):
class SortModel(torch.nn.Module):
def forward(self, x):
out = []
for i in range(-2, 2):
out.append(torch.sort(x, dim=i, descending=False))
return out
x = torch.randn(3, 4)
self.run_test(SortModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill(self):
class MaskedFillModel(torch.nn.Module):
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
return x.masked_fill(mask, 2)
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.nn.Module):
def forward(self, x):
return x.masked_fill(x > 3, -1)
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_masked_fill_inplace(self):
class MaskedFillModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
mask = torch.tensor([[0, 0, 1], [1, 1, 0]], dtype=torch.uint8)
x.masked_fill_(mask, 2)
return x
x = torch.zeros(4, 2, 3, requires_grad=True)
self.run_test(MaskedFillModel(), x)
class MaskedFillModel2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
x.masked_fill_(x > 3, -1)
return x
x = torch.arange(16).view(2, 2, 4).to(torch.float32)
self.run_test(MaskedFillModel2(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_scatter(self):
class MaskedScatterModel(torch.nn.Module):
def forward(self, x):
return torch.masked_scatter(x, x.ge(0.5), torch.ones(100, 100) * 5)
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedScatterModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_masked_select(self):
class MaskedSelectModel(torch.nn.Module):
def forward(self, x):
return torch.masked_select(x, x.ge(0.5))
x = torch.randn(3, 4, 5, requires_grad=True)
self.run_test(MaskedSelectModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_pixel_shuffle(self):
class PixelShuffle(torch.nn.Module):
def forward(self, x):
return torch.pixel_shuffle(x, upscale_factor=2)
x = torch.randn(2, 16, 4, 3, requires_grad=True)
self.run_test(PixelShuffle(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_scalar_type(self):
class ArithmeticModel(torch.nn.Module):
def forward(self, x):
return x.size(0) * 2 * x
x = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ArithmeticModel(), x)
class ReciprocalModel(torch.nn.Module):
def forward(self, x):
return torch.reciprocal(x)
x = torch.tensor([2.0, 4.0], dtype=torch.double)
self.run_test(ReciprocalModel(), x)
class ComparisonModel(torch.nn.Module):
def forward(self, x, y):
a = torch.tensor([12.0])
return x.lt(1.5) & y.le(2) & x.le(1), x.gt(y), x.lt(y), a.ge(x.size(0))
x = torch.ones(2, 3, dtype=torch.int32)
y = torch.ones(2, 3, dtype=torch.float32)
self.run_test(ComparisonModel(), (x, y))
class MatMulModel(torch.nn.Module):
def forward(self, x):
return (torch.mm(x, x) + x + torch.mm(x, x) + x)
x = torch.ones(3, 3)
self.run_test(MatMulModel(), x)
class AddMMModel(torch.nn.Module):
def forward(self, x):
return torch.mm(x, x) + x
x = torch.ones(3, 3)
self.run_test(AddMMModel(), x)
class FullModel(torch.nn.Module):
# add is used for exporting full
def forward(self, x):
return torch.full((3, 4), x)
x = torch.tensor(12.)
self.run_test(FullModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # dtype mismatch
def test_full_like(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x):
return torch.full_like(x, 4)
x = torch.tensor(12)
self.run_test(FullLikeModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # dtype mismatch
def test_full_like_value(self):
class FullLikeModel(torch.nn.Module):
def forward(self, x, y):
out = y + 2
return torch.full_like(x, out)
x = torch.tensor(12)
y = torch.tensor(2)
self.run_test(FullLikeModel(), (x, y))
def test_l1_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=1, dim=-1, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_l2_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p=2, dim=-2, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=0, keepdim=False)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_frobenius_norm_keepdim(self):
class NormModel(torch.nn.Module):
def forward(self, x):
return torch.norm(x, p="fro", dim=(0, 1), keepdim=True)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(NormModel(), x)
def test_unfold(self):
class UnfoldModel(torch.nn.Module):
def forward(self, x):
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(UnfoldModel(), x)
@skipIfONNXShapeInference(False)
def test_unfold_infer_shape(self):
class UnfoldModule(torch.jit.ScriptModule):
def __init__(self):
super(UnfoldModule, self).__init__()
self.conv = torch.nn.Conv1d(3, 1, 3, stride=2)
@torch.jit.script_method
def forward(self, x):
x = self.conv(x)
return x.unfold(dimension=2, size=2, step=2)
x = torch.randn(32, 3, 64)
self.run_test(UnfoldModule(), x)
def test_remainder(self):
class RemainderModel(torch.nn.Module):
def forward(self, input, other):
return torch.remainder(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(RemainderModel(), (x, y))
def test_remainder_scalar(self):
class RemainderModel(torch.nn.Module):
def forward(self, input):
return torch.remainder(input, 2.55)
x = torch.randint(10, (2, 3))
self.run_test(RemainderModel(), x)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod(self):
class FModModel(torch.nn.Module):
def forward(self, input, other):
return torch.fmod(input, other)
x = torch.randn(4, 2, 3)
y = torch.randn(1, 2, 1)
self.run_test(FModModel(), (x, y))
@skipIfUnsupportedMinOpsetVersion(10)
def test_fmod_scalar(self):
class FModModel(torch.nn.Module):
def forward(self, input):
return torch.fmod(input, 2.55)
x = torch.randint(10, (2, 3))
self.run_test(FModModel(), x)
@skipIfUnsupportedMinOpsetVersion(9)
def test_gelu(self):
class GeluModel(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.gelu(x)
x = torch.randn(2, 4, 5, 6, requires_grad=True)
self.run_test(GeluModel(), x)
def test_add_inplace(self):
class InplaceAddModel(torch.nn.Module):
def forward(self, x):
x += 12
return x
x = torch.randn(4, 2, 3, requires_grad=True)
self.run_test(InplaceAddModel(), x)
def test_rsqrt(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.randn(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
def test_rsqrt_zeros(self):
class RsqrtModel(torch.nn.Module):
def forward(self, x):
return x.rsqrt()
x = torch.zeros(4, 2, 3, requires_grad=True, dtype=torch.float64)
self.run_test(RsqrtModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(x, sorted=True, return_inverse=False, return_counts=True)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_unique_along_dim(self):
class UniqueModel(torch.nn.Module):
def forward(self, x):
return torch.unique(x, dim=0, sorted=True, return_inverse=True, return_counts=False)
x = torch.tensor([1, 3, 2, 3], dtype=torch.long)
self.run_test(UniqueModel(), x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0)
x = torch.randn(2, 3, 4)
model = CumSum()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_cumsum_with_cast(self):
class CumSum(torch.nn.Module):
def forward(self, input):
return torch.cumsum(input, dim=0, dtype=torch.float32)
model = CumSum()
x = torch.tensor([2, 3, 4], dtype=torch.int32)
self.run_test(model, x)
x = torch.tensor([False, True, True])
self.run_test(model, x)
@disableScriptTest() # error in propagate as assign input shape
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedOpsetVersion([12]) # Due to ONNX Loop shape inference issue
def test_embedding_bag(self):
model = torch.nn.EmbeddingBag(10, 5, mode='sum', scale_grad_by_freq=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode='sum', include_last_offset=True)
input = torch.randint(10, (7,))
offset = torch.tensor([0, 2, 5, 6])
self.run_test(model, (input, offset))
model = torch.nn.EmbeddingBag(10, 5, mode='max')
input = torch.randint(10, (7, 5))
self.run_test(model, (input))
@disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedOpsetVersion([12]) # Due to ONNX Loop shape inference issue
def test_embedding_bag_1d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, offset, weights):
return torch.nn.functional.embedding_bag(input, embedding_matrix, offsets=offset,
mode='sum', per_sample_weights=weights)
model = EmbeddingModel()
x = torch.randint(7, (6,))
w = torch.randn(6, )
offset = torch.tensor([0, 2, 5])
embedding_matrix = torch.rand(10, 15)
self.run_test(model, (embedding_matrix, x, offset, w))
@disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(10)
@skipIfUnsupportedOpsetVersion([12]) # Due to ONNX Loop shape inference issue
def test_embedding_bag_2d_per_sample_weights(self):
class EmbeddingModel(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(input, embedding_matrix,
mode='sum', per_sample_weights=weights)
embedding_matrix = torch.rand(10, 15)
model = EmbeddingModel()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
self.run_test(model, (embedding_matrix, x, w))
@disableScriptTest() # scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(11)
@unittest.skip("Due to ONNX Loop shape inference issue.")
def test_embedding_bag_dynamic_input(self):
class EmbeddingModel1D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights, offsets):
return torch.nn.functional.embedding_bag(input, embedding_matrix, offsets=offsets,
mode='sum', per_sample_weights=weights)
model = EmbeddingModel1D()
x = torch.randint(7, (6,))
w = torch.randn(6, )
offsets = torch.tensor([0, 2, 5], dtype=torch.long)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (2,))
w2 = torch.randn(2, )
embedding_matrix2 = torch.rand(12, 25)
offsets2 = torch.tensor([0, ], dtype=torch.long)
self.run_test(model, (embedding_matrix, x, w, offsets),
test_with_inputs=[(embedding_matrix2, x2, w2, offsets2)],
input_names=['embedding_matrix', 'x', 'offsets', 'w'],
dynamic_axes={'embedding_matrix': [0, 1], 'x': [0], 'offsets': [0], 'w': [0]})
class EmbeddingModel2D(torch.nn.Module):
def forward(self, embedding_matrix, input, weights):
return torch.nn.functional.embedding_bag(input, embedding_matrix,
mode='sum', per_sample_weights=weights)
model = EmbeddingModel2D()
x = torch.randint(7, (2, 3))
w = torch.randn(2, 3)
embedding_matrix = torch.rand(10, 15)
x2 = torch.randint(7, (3, 5))
w2 = torch.randn(3, 5)
embedding_matrix2 = torch.rand(12, 25)
self.run_test(model, (embedding_matrix, x, w),
test_with_inputs=[(embedding_matrix2, x2, w2)],
input_names=['embedding_matrix', 'x', 'w'],
dynamic_axes={'embedding_matrix': [0, 1], 'x': [0, 1], 'w': [0, 1]})
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.randn(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.randn(5, requires_grad=True)
self.run_test(Meshgrid(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(8)
def test_meshgrid_scalar(self):
class Meshgrid(torch.nn.Module):
def forward(self, x, y, z):
output1, output2, output3 = torch.meshgrid(x, y, z)
return output1, output2, output3
x = torch.ones(3, requires_grad=True)
y = torch.zeros(4, requires_grad=True)
z = torch.tensor(2.0)
self.run_test(Meshgrid(), (x, y, z))
def test_baddbmm(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2):
return torch.baddbmm(input, batch1, batch2, alpha=torch.tensor(5), beta=3.5)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
model = MyModule()
self.run_test(model, (x, batch1, batch2))
def test_baddbmm_dynamic(self):
class MyModule(torch.nn.Module):
def forward(self, input, batch1, batch2, alpha, beta):
return torch.baddbmm(input, batch1, batch2, alpha=alpha, beta=beta)
x = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
alpha = torch.tensor(5)
beta = torch.tensor(3.5)
model = MyModule()
self.run_test(model, (x, batch1, batch2, alpha, beta))
def test_numel(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.numel() * input
x = torch.randn(2, 3, 5)
model = MyModule()
self.run_test(model, (x,))
def test_numel_empty(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return input.numel() * input
x = torch.randn(0)
model = MyModule()
self.run_test(model, (x,))
def test_cast_to(self):
class MyModule(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, other):
return input.to(other) + other
x = torch.randn(2, 3, 4)
y = torch.tensor([1], dtype=torch.int64)
model = MyModule()
self.run_test(model, (x, y))
def test_cast_to_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input, other):
return torch.cat((input.to(other), other), 0)
x = torch.randn(2, 3, 4)
y = torch.zeros([2, 3, 4], dtype=torch.bool)
model = MyModule()
self.run_test(model, (x, y))
@skipIfUnsupportedMinOpsetVersion(9)
def test_ones_bool(self):
class MyModule(torch.nn.Module):
def forward(self, input):
true = torch.ones(input.shape, dtype=torch.bool)
return input.to(true) & true
x = torch.randn(2, 3, 4)
model = MyModule()
self.run_test(model, x)
def test_log(self):
class Log(torch.nn.Module):
def forward(self, input):
return torch.log(input)
x = torch.rand(2, 3, 4)
model = Log()
self.run_test(model, x)
def test_log1p(self):
class Log1p(torch.nn.Module):
def forward(self, input):
return torch.log1p(input)
x = torch.rand(2, 3, 4)
model = Log1p()
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
def test_round(self):
class Round(torch.nn.Module):
def forward(self, x):
return torch.round(x)
x = torch.tensor([0.9920, -1.0362, -1.5000, 3.5000], requires_grad=True)
self.run_test(Round(), x)
def test_constant_pad(self):
model = torch.nn.ConstantPad1d(2, 3.5)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5)
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
# Dynamic padding is added in opset 11
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # Functional module not scriptable
def test_pad_types(self):
# Test for different pad integer types
class Pad(torch.nn.Module):
def forward(self, x, pad):
return torch.nn.functional.pad(x, pad)
x = torch.randn(2, 2, 4, 4)
y = pad = (torch.tensor(2, dtype=torch.int32), torch.tensor(4, dtype=torch.int32))
self.run_test(Pad(), (x, y))
y = pad = (torch.tensor(2, dtype=torch.int64), torch.tensor(4, dtype=torch.int64))
self.run_test(Pad(), (x, y))
@skipIfUnsupportedMaxOpsetVersion(10)
def test_unsupported_pad(self):
class Pad(torch.nn.Module):
def forward(self, x, pad):
return torch.nn.functional.pad(x, pad)
def run():
x = torch.randn(2, 2, 4, 4)
y = pad = (torch.tensor(2, dtype=torch.int32), torch.tensor(4, dtype=torch.int32))
p = Pad()
f = io.BytesIO()
torch.onnx._export(p, (x, y), f)
with self.assertRaises(RuntimeError) as cm:
run()
the_exception = cm.exception
self.assertEqual('Unsupported: ONNX export of Pad in opset 9. The sizes of the padding must be constant. ' +
'Please try opset version 11.', the_exception.args[0])
@disableScriptTest() # export prim::Uninitialized
def test_reflection_pad(self):
model = torch.nn.ReflectionPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReflectionPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
@disableScriptTest() # export prim::Uninitialized
def test_replication_pad(self):
model = torch.nn.ReplicationPad1d(2)
x = torch.randn(2, 4, 4)
self.run_test(model, x)
model = torch.nn.ReplicationPad2d((3, 0, 2, 1))
x = torch.randn(2, 2, 4, 4)
self.run_test(model, x)
@skipIfUnsupportedMinOpsetVersion(11)
@disableScriptTest() # export prim::Uninitialized
def test_im2col(self):
class Unfold(torch.nn.Module):
def forward(self, input):
return torch.nn.functional.unfold(input, kernel_size=(10, 15), dilation=2, padding=5, stride=3), \
torch.nn.functional.unfold(input, kernel_size=(2, 2), dilation=1, padding=0, stride=3), \
torch.nn.functional.unfold(input, kernel_size=(1, 1), dilation=5, padding=2, stride=3)
x = torch.rand(1, 1, 200, 100)
self.run_test(Unfold(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_det(self):
class Det(torch.nn.Module):
def forward(self, x):
return torch.det(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(Det(), x)
# This test checks output scalar type in the ONNX graph should not be null
# https://github.com/pytorch/pytorch/issues/28607
@skipIfUnsupportedMinOpsetVersion(10)
def test_trace_script(self):
@torch.jit.script
def center_slice_helper(input, h_offset):
return input[:, h_offset:]
class CenterCrop(torch.nn.Module):
def forward(self, input):
return center_slice_helper(input, torch.tensor(input.shape[1] - 1))
x = torch.randn(3, 4)
self.run_test(CenterCrop(), x)
@skipIfNoLapack
@skipIfUnsupportedMinOpsetVersion(11)
def test_logdet(self):
class LogDet(torch.nn.Module):
def forward(self, x):
return torch.logdet(x)
x = torch.randn(2, 3, 5, 5)
self.run_test(LogDet(), x)
def test_dim(self):
class DimModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input * 2
out *= out.dim()
return out
empty_input = torch.randn(0, requires_grad=True)
multi_dim_input = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(DimModel(), empty_input)
self.run_test(DimModel(), multi_dim_input)
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # variable number of inputs not scriptable
def test_einsum(self):
class EinsumModelBatchDiagonal(torch.nn.Module):
def forward(self, *tensor_list):
eqn = '...ii ->...i'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(3, 5, 5)
self.run_test(EinsumModelBatchDiagonal(), input=(x,))
class EinsumModelBatchMatmul(torch.nn.Module):
def forward(self, *tensor_list):
eqn = 'bij, bjk -> bik'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(5, 2, 3)
y = torch.randn(5, 3, 4)
self.run_test(EinsumModelBatchMatmul(), input=(x, y))
class EinsumModelInnerProd(torch.nn.Module):
def forward(self, *tensor_list):
eqn = 'i,i'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(5)
y = torch.randn(5)
self.run_test(EinsumModelInnerProd(), input=(x, y))
class EinsumModelTranspose(torch.nn.Module):
def forward(self, *tensor_list):
eqn = 'ij->ji'
return torch.einsum(eqn, *tensor_list)
x = torch.randn(3, 4)
self.run_test(EinsumModelTranspose(), input=(x,))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_crossentropyloss(self):
for ignore_index in [-100, 1]:
x = torch.randn(3, 5)
y = torch.empty(3, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2)
y = torch.empty(3, 2, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
x = torch.randn(3, 5, 2, 7)
y = torch.empty(3, 2, 7, dtype=torch.long).random_(5)
y[y == 1] = ignore_index
self._crossentropyloss(x, y, ignore_index)
def _crossentropyloss(self, x, y, ignore_index):
class CrossEntropyLossNone(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossNone, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='none')
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='none', ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNone(ignore_index), input=(x, y))
class CrossEntropyLossNoneWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossNoneWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='none', weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='none', weight=torch.randn(5), ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossNoneWeight(ignore_index), input=(x, y))
class CrossEntropyLossSum(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossSum, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum')
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum', ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSum(ignore_index), input=(x, y))
class CrossEntropyLossSumWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossSumWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum', weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(reduction='sum', weight=torch.randn(5), ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossSumWeight(ignore_index), input=(x, y))
class CrossEntropyLossMean(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossMean, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss()
else:
self.loss = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMean(ignore_index), input=(x, y))
class CrossEntropyLossMeanWeight(torch.nn.Module):
def __init__(self, ignore_index):
super(CrossEntropyLossMeanWeight, self).__init__()
if ignore_index == -100:
self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5))
else:
self.loss = torch.nn.CrossEntropyLoss(weight=torch.randn(5), ignore_index=ignore_index)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(CrossEntropyLossMeanWeight(ignore_index), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # Output dtype mismatch
def test_kldiv_loss(self):
x = torch.randn(5)
y = torch.randn(5)
self._kldiv_loss(x, y)
x = torch.randn(2, 3, 5)
y = torch.randn(2, 3, 5)
self._kldiv_loss(x, y)
x = torch.randn(2, 3, 5, 7)
y = torch.randn(2, 3, 5, 7)
self._kldiv_loss(x, y)
def _kldiv_loss(self, x, y):
class KLDivLossNone(torch.nn.Module):
def __init__(self):
super(KLDivLossNone, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='none', log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossNone(), input=(x, y))
class KLDivLossMean(torch.nn.Module):
def __init__(self):
super(KLDivLossMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='mean', log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMean(), input=(x, y))
class KLDivLossSum(torch.nn.Module):
def __init__(self):
super(KLDivLossSum, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='sum', log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossSum(), input=(x, y))
class KLDivLossBatchMean(torch.nn.Module):
def __init__(self):
super(KLDivLossBatchMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='batchmean', log_target=False)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossBatchMean(), input=(x, y))
class KLDivLossMiniBatchMean(torch.nn.Module):
def __init__(self):
super(KLDivLossMiniBatchMean, self).__init__()
self.loss = torch.nn.KLDivLoss(reduction='batchmean', size_average=False, log_target=True)
def forward(self, input, target):
return self.loss(input, target)
self.run_test(KLDivLossMiniBatchMean(), input=(x, y))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='none')
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(2 * input), target)
return output
N, C = 5, 4
input = torch.randn(N, 16)
target = torch.empty(N, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_none(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='none')
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean')
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_sum(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='sum')
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean', weight=torch.randn(C))
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
# using test data containing default ignore_index=-100
target[target == 1] = -100
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean_ignore_index(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean', ignore_index=1)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
@skipIfUnsupportedMinOpsetVersion(12)
@disableScriptTest() # shape/type inference
def test_nllloss_2d_mean_ignore_index_weights(self):
class NLLModel(torch.nn.Module):
def __init__(self):
super(NLLModel, self).__init__()
self.loss = torch.nn.NLLLoss(reduction='mean', weight=torch.randn(C), ignore_index=1)
self.conv = torch.nn.Conv2d(16, C, (3, 3))
self.m = torch.nn.LogSoftmax(dim=1)
def forward(self, input, target):
output = self.loss(self.m(self.conv(input)), target)
return output
N, C = 5, 4
input = torch.randn(N, 16, 10, 10)
target = torch.empty(N, 8, 8, dtype=torch.long).random_(0, C)
self.run_test(NLLModel(), (input, target))
def test_torch_mm(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
mm = torch.mm(mat1, mat2)
return mm
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
self.run_test(M(), input=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(9) # Because where op is not supported for opset < 9.
def test_where_with_bool_tensor(self):
class M(torch.nn.Module):
def forward(self, mat1, mat2):
out = torch.where(mat1 > 0, mat1, mat2)
return out
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input=(mat1, mat2))
@skipIfUnsupportedMinOpsetVersion(9) # Because where op is not supported for opset < 9.
def test_where_with_byte_tensor(self):
class M(torch.nn.Module):
def forward(self, cond, mat1, mat2):
out = torch.where(cond, mat1, mat2)
return out
cond = torch.ones(2, 3, dtype=torch.uint8)
cond[1, 2] = 0
mat1 = torch.randn(2, 3)
mat2 = torch.ones(2, 3)
self.run_test(M(), input=(cond, mat1, mat2))
def test_dropout(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.dropout = torch.nn.Dropout(0.3)
def forward(self, x):
dropout = self.dropout(x)
return dropout
x = torch.randn(10, 3, 53)
self.run_test(M(), (x))
def test_shape_constant_fold(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super(ShapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
self.run_test(ShapeModule(), (x,), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU(alpha=1.0)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_default(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_alpha(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU(alpha=2.)
def forward(self, input):
return self.celu(input)
input = torch.randn(2)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(12)
def test_celu_cast(self):
class Celu(torch.nn.Module):
def __init__(self):
super(Celu, self).__init__()
self.celu = torch.nn.CELU()
def forward(self, input):
return self.celu(input)
input = torch.randn(2, 5, 7, dtype=torch.float64)
self.run_test(Celu(), (input,))
@skipIfUnsupportedMinOpsetVersion(9)
def test_where(self):
class Model(torch.nn.Module):
def forward(self, cond, input, other):
return torch.where(cond, input, other)
x = torch.randint(0, 1, (2, 3, 4), dtype=torch.bool)
y = torch.randn(2, 1, 4)
z = torch.ones(2, 3, 1)
self.run_test(Model(), (x, y, z))
@skipIfUnsupportedMinOpsetVersion(9)
@disableScriptTest() # symbolic update needed for unbind: ONNX export of unbind with dynamic number of outputs
def test_where_condition(self):
class Model1(torch.nn.Module):
def forward(self, input):
return torch.stack(torch.where(input > 0.5), dim=1)
x = torch.randint(0, 2, (2, 3, 4), dtype=bool)
self.run_test(Model1(), (x))
class Model2(torch.nn.Module):
def forward(self, input, other):
return torch.stack(torch.where(input > other), dim=1)
x = torch.randint(0, 1, (2, 3, 4), dtype=bool)
y = torch.randint(1, 2, (2, 3, 4), dtype=bool)
self.run_test(Model2(), (x, y))
def test_empty_branch(self):
class EmptyBranchModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
out = input + 1
if out.dim() > 2:
if out.dim() > 3:
out += 3
else:
pass
else:
pass
return out
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(EmptyBranchModel(), x)
@skipIfONNXShapeInference(False)
@skipIfUnsupportedMinOpsetVersion(11)
def test_if_transpose(self):
class IfModel(torch.nn.Module):
def forward(self, x):
x = x.transpose(0, 1)
if x.size(0) == 2:
return x.transpose(0, 1)
else:
return x
x = torch.randn(2, 3)
self.run_test(torch.jit.script(IfModel()), x,
output_names=['output_1'],
dynamic_axes={'output_1': [0, 1]})
def test_onnx_proto_checker(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
return 2 * x
x = torch.randn(1, 2, 3, requires_grad=True)
f = io.BytesIO()
torch.onnx._export(Model(), x, f)
model = onnx.load(f)
model.ir_version = 0
def check_proto():
torch._C._check_onnx_proto(model.SerializeToString())
self.assertRaises(RuntimeError, check_proto)
@disableScriptTest() # dtype mismatch
def test_split_tensor_scalar(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, x.size(1))
x = torch.randn(1, 2, 3, requires_grad=True)
self.run_test(SplitModel(), x)
def test_split_tensor_multi(self):
class SplitModel(torch.nn.Module):
def forward(self, x):
return torch.split(x, torch.ones(3))
x = torch.randn(1, 2, 3, requires_grad=True)
def run_model():
SplitModel(x)
self.assertRaises(TypeError, run_model)
def _dispatch_rnn_test(self, name, *args, **kwargs):
if name == 'elman':
self._elman_rnn_test(*args, **kwargs)
if name == 'lstm':
self._lstm_test(*args, **kwargs)
if name == 'gru':
self._gru_test(*args, **kwargs)
def _elman_rnn_test(self, layers, nonlinearity, bidirectional,
initial_state, packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.RNN(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, nonlinearity=nonlinearity,
bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
def _lstm_test(self, layers, bidirectional, initial_state,
packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = LstmFlatteningResult(
RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers,
bidirectional=bidirectional, dropout=dropout, batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
c0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append((h0, c0))
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
def _gru_test(self, layers, bidirectional, initial_state,
packed_sequence, dropout):
batch_first = True if packed_sequence == 2 else False
model = torch.nn.GRU(RNN_INPUT_SIZE, RNN_HIDDEN_SIZE, layers, bidirectional=bidirectional, dropout=dropout,
batch_first=batch_first)
if packed_sequence == 1:
model = RnnModelWithPackedSequence(model, False)
if packed_sequence == 2:
model = RnnModelWithPackedSequence(model, True)
def make_input(batch_size):
seq_lengths = np.random.randint(1, RNN_SEQUENCE_LENGTH + 1, size=batch_size)
seq_lengths = list(reversed(sorted(map(int, seq_lengths))))
inputs = [torch.randn(l, RNN_INPUT_SIZE) for l in seq_lengths]
inputs = rnn_utils.pad_sequence(inputs, batch_first=batch_first)
inputs = [inputs]
directions = 2 if bidirectional else 1
if initial_state:
h0 = torch.randn(directions * layers, batch_size, RNN_HIDDEN_SIZE)
inputs.append(h0)
if packed_sequence != 0:
inputs.append(torch.IntTensor(seq_lengths))
if len(inputs) == 1:
input = inputs[0]
else:
input = tuple(inputs)
return input
input = make_input(RNN_BATCH_SIZE)
self.run_test(model, input, batch_size=RNN_BATCH_SIZE)
# test that the model still runs with a different batch size
other_input = make_input(RNN_BATCH_SIZE + 1)
self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1)
@skipIfUnsupportedMinOpsetVersion(10)
def test_fake_quantize_per_tensor(self):
class FakeQuantizePerTensorModel(torch.nn.Module):
def forward(self, input):
scale = 1. / 127
zero_point = 0
quant_min = -128
quant_max = 127
return torch.fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max)
x = torch.randn(6, 4, 3, 3)
self.run_test(FakeQuantizePerTensorModel(), (x))
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.4)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
x = torch.randn(10)
model.train()
ort_sess = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs = run_ort(ort_sess, input=(x,))
assert not torch.all(torch.eq(x, torch.from_numpy(ort_outs[0])))
@skipIfUnsupportedMinOpsetVersion(12)
def test_dropout_training_zero(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
# ensure there are no zeros in the input
x = torch.randn(10, 3, 128, 128)
y = x.numpy()
y_mask = np.where(y == 0, 1, y)
input = torch.from_numpy(y_mask)
nb_elements = torch.numel(input)
model.train()
ort_sess = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs = run_ort(ort_sess, input=(x,))
y = model(input)
output = y.cpu().numpy()
ort_mask = np.where(ort_outs[0] != 0, 1, 0)
pyt_mask = np.where(output != 0, 1, 0)
ratio_pytorch = np.sum(pyt_mask) / nb_elements
ratio_ort = np.sum(ort_mask) / nb_elements
np.testing.assert_allclose(ratio_pytorch, ratio_ort, rtol=0.01, atol=0.01)
def test_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 16, kernel_size=1, stride=2, padding=3, bias=True)
self.bn = torch.nn.BatchNorm2d(16, affine=True)
def forward(self, x):
x = self.conv(x)
bn = self.bn(x)
return bn
model = MyModule()
x = torch.randn(10, 3, 128, 128)
ort_sess1 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs1 = run_ort(ort_sess1, input=(x,))
ort_sess2 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL)
ort_outs2 = run_ort(ort_sess2, input=(x,))
[np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in
zip(ort_outs1, ort_outs2)]
def test_multiple_conv_bn(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.conv2 = torch.nn.Conv2d(64, 2, kernel_size=1, stride=1, padding=0, bias=False)
self.conv3 = torch.nn.Conv2d(2, 2, kernel_size=3, stride=1, padding=1, bias=False)
self.bn = torch.nn.BatchNorm2d(64)
self.bn2 = torch.nn.BatchNorm2d(2)
self.relu = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn2(x)
x = self.relu(x)
return x
model = MyModule()
x = torch.randn(2, 3, 224, 224)
ort_sess1 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.TRAINING)
ort_outs1 = run_ort(ort_sess1, input=(x,))
ort_sess2 = convert_to_onnx(model, input=(x,), opset_version=self.opset_version,
training=torch.onnx.TrainingMode.EVAL)
ort_outs2 = run_ort(ort_sess2, input=(x,))
[np.testing.assert_allclose(ort_out1, ort_out2, atol=1e-7, rtol=0.001) for ort_out1, ort_out2 in
zip(ort_outs1, ort_outs2)]
def make_test(name, base, layer, bidirectional, initial_state,
variable_length, dropout,
**extra_kwargs):
test_name = str('_'.join([
'test', name, layer[1],
bidirectional[1], initial_state[1],
variable_length[1], dropout[1]
]))
# Cannot export with older opsets because of 'ConstantFill' op
# ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime
@disableScriptTest() # Test code not scriptable
@skipIfUnsupportedMinOpsetVersion(9)
def f(self):
self._dispatch_rnn_test(
base,
layers=layer[0],
bidirectional=bidirectional[0],
initial_state=initial_state[0],
packed_sequence=variable_length[0],
dropout=dropout[0],
**extra_kwargs)
f.__name__ = test_name
setattr(TestONNXRuntime, f.__name__, f)
def setup_rnn_tests():
layers_opts = [
(1, 'unilayer'),
(3, 'trilayer')
]
bidirectional_opts = [
(False, 'forward'),
(True, 'bidirectional')
]
initial_state_opts = [
(True, 'with_initial_state'),
(False, 'no_initial_state')
]
variable_length_opts = [
(0, 'without_sequence_lengths'),
(1, 'with_variable_length_sequences'),
(2, 'with_batch_first_sequence_lengths')
]
dropout_opts = [
(0.2, 'with_dropout'),
(0.0, 'without_dropout')
]
test_count = 0
for (layer, bidirectional, initial_state, variable_length, dropout) in \
itertools.product(
layers_opts,
bidirectional_opts,
initial_state_opts,
variable_length_opts,
dropout_opts,):
for base, name, extra_kwargs in (
('elman', 'elman_relu', {'nonlinearity': u'relu'}),
('elman', 'elman_tanh', {'nonlinearity': u'tanh'}),
('lstm', 'lstm', {}),
('gru', 'gru', {})
):
make_test(name, base, layer, bidirectional, initial_state,
variable_length, dropout,
**extra_kwargs)
test_count += 1
# sanity check that a representative example does exist
TestONNXRuntime.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout
# make sure no one accidentally disables all the tests without
# noticing
if test_count != 192:
raise ValueError('Expected 192 tests but found {}'.format(test_count))
setup_rnn_tests()
# opset 7 tests
TestONNXRuntime_opset7 = type(str("TestONNXRuntime_opset7"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=7))
# opset 8 tests
TestONNXRuntime_opset8 = type(str("TestONNXRuntime_opset8"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=8))
# opset 10 tests
TestONNXRuntime_opset10 = type(str("TestONNXRuntime_opset10"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=10))
# opset 11 tests
TestONNXRuntime_opset11 = type(str("TestONNXRuntime_opset11"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=11))
# opset 12 tests
TestONNXRuntime_opset12 = type(str("TestONNXRuntime_opset12"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12))
# opset 9 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset9_IRv4 = type(str("TestONNXRuntime_opset9_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__,
keep_initializers_as_inputs=False))
# opset 10 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset10_IRv4 = type(str("TestONNXRuntime_opset10_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=10,
keep_initializers_as_inputs=False))
# opset 11 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset11_IRv4 = type(str("TestONNXRuntime_opset11_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=11,
keep_initializers_as_inputs=False))
# opset 12 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset12_IRv4 = type(str("TestONNXRuntime_opset12_IRv4"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12,
keep_initializers_as_inputs=False))
# opset 9 tests, with use_new_jit_passes=True for using new jit API,
# and with keep_initializers_as_inputs=False for IR version 4 style export.
TestONNXRuntime_opset9_IRv4_new_jit_API = type(str("TestONNXRuntime_opset9_IRv4_new_jit_API"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__,
keep_initializers_as_inputs=False,
use_new_jit_passes=True,
onnx_shape_inference=True))
# opset 12 tests, with use_new_jit_passes=True for using new jit API,
# and keep_initializers_as_inputs=False for IR version 4 style export.
TestONNXRuntime_opset12_IRv4_new_jit_API = type(str("TestONNXRuntime_opset12_IRv4_new_jit_API"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12,
keep_initializers_as_inputs=False,
use_new_jit_passes=True,
onnx_shape_inference=True))
# opset 12 tests, with _onnx_shape_inference=True.
TestONNXRuntime_opset12_onnx_shape_inference = type(str("TestONNXRuntime_opset12_onnx_shape_inference"),
(unittest.TestCase,),
dict(TestONNXRuntime.__dict__, opset_version=12,
onnx_shape_inference=True))
if __name__ == '__main__':
unittest.main()
|
readthedocs/projects/migrations/0021_add-webhook-deprecation-feature.py | tkoyama010/readthedocs.org | 4,054 | 12758887 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""Add feature for allowing access to deprecated webhook endpoints."""
from django.db import migrations
FEATURE_ID = 'allow_deprecated_webhooks'
def forward_add_feature(apps, schema_editor):
Feature = apps.get_model('projects', 'Feature')
Feature.objects.create(
feature_id=FEATURE_ID,
default_true=True,
)
def reverse_add_feature(apps, schema_editor):
Feature = apps.get_model('projects', 'Feature')
Feature.objects.filter(feature_id=FEATURE_ID).delete()
class Migration(migrations.Migration):
dependencies = [
('projects', '0020_add-api-project-proxy'),
]
operations = [
migrations.RunPython(forward_add_feature, reverse_add_feature),
]
|
test/tet_train.py | luoyudong593/Keras-TextClassification | 1,339 | 12758889 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2019/11/12 16:45
# @author : Mo
# @function:
from keras_textclassification import train
train(graph='TextCNN', # 必填, 算法名, 可选"ALBERT","BERT","XLNET","FASTTEXT","TEXTCNN","CHARCNN",
# "TEXTRNN","RCNN","DCNN","DPCNN","VDCNN","CRNN","DEEPMOJI",
# "SELFATTENTION", "HAN","CAPSULE","TRANSFORMER"
label=17, # 必填, 类别数, 训练集和测试集合必须一样
path_train_data=None, # 必填, 训练数据文件, csv格式, 必须含'label,ques'头文件, 详见keras_textclassification/data
path_dev_data=None, # 必填, 测试数据文件, csv格式, 必须含'label,ques'头文件, 详见keras_textclassification/data
rate=1, # 可填, 训练数据选取比例
hyper_parameters=None) # 可填, json格式, 超参数, 默认embedding为'char','random'
|
gcp_variant_transforms/transforms/merge_header_definitions_test.py | tsa87/gcp-variant-transforms | 113 | 12758894 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for merge_header_definitions module."""
import unittest
from pysam import libcbcf
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import Create
from gcp_variant_transforms.beam_io import vcf_header_io
from gcp_variant_transforms.transforms import merge_header_definitions
from gcp_variant_transforms.libs.vcf_header_definitions_merger import Definition
from gcp_variant_transforms.libs.vcf_header_definitions_merger import VcfHeaderDefinitions
class MergeHeadersTest(unittest.TestCase):
def _get_header_from_lines(self, lines, file_path):
header = libcbcf.VariantHeader()
for line in lines[:-1]:
header.add_line(line)
return vcf_header_io.VcfHeader(infos=header.info,
filters=header.filters,
alts=header.alts,
formats=header.formats,
contigs=header.contigs,
file_path=file_path)
def test_merge_header_definitions_one_header(self):
lines = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
headers = self._get_header_from_lines(lines, 'file1')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {'NS': {Definition(1, 'Integer'): ['file1']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_two_conflicting_headers(self):
lines_1 = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
headers_1 = self._get_header_from_lines(lines_1, 'file1')
headers_2 = self._get_header_from_lines(lines_2, 'file2')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers_1, headers_2])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {'NS': {Definition(1, 'Integer'): ['file1'],
Definition(1, 'Float'): ['file2']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_no_conflicting_headers(self):
lines_1 = [
'##FORMAT=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##FORMAT=<ID=DP,Number=2,Type=Float,Description="Total Depth">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
headers_1 = self._get_header_from_lines(lines_1, 'file1')
headers_2 = self._get_header_from_lines(lines_2, 'file2')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers_1, headers_2])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._formats = {'NS': {Definition(1, 'Float'): ['file1']},
'DP': {Definition(2, 'Float'): ['file2']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_same_id_in_info_and_format_headers(self):
lines_1 = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##FORMAT=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
headers_1 = self._get_header_from_lines(lines_1, 'file1')
headers_2 = self._get_header_from_lines(lines_2, 'file2')
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create([headers_1, headers_2])
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {'NS': {Definition(1, 'Integer'): ['file1']}}
expected._formats = {'NS': {Definition(1, 'Float'): ['file2']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
def test_merge_header_definitions_save_five_copies(self):
lines_1 = [
'##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n'
]
lines_2 = [
'##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n',
'#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n'
]
file_names = ['file1', 'file2', 'file3', 'file4', 'file5', 'file6']
headers = []
for file_name in file_names:
headers.append(self._get_header_from_lines(lines_1, file_name))
headers.append(self._get_header_from_lines(lines_2, 'file7'))
pipeline = TestPipeline()
merged_definitions = (
pipeline
| Create(headers, reshuffle=False)
| 'MergeDefinitions' >> merge_header_definitions.MergeDefinitions())
expected = VcfHeaderDefinitions()
expected._infos = {
'NS': {Definition(1, 'Float'):
['file1', 'file2', 'file3', 'file4', 'file5'],
Definition(1, 'Integer'): ['file7']}}
assert_that(merged_definitions, equal_to([expected]))
pipeline.run()
|
demo/helper.py | scottx611x/napkin | 190 | 12758913 | <reponame>scottx611x/napkin
import inspect
import os
import napkin
def generate_markdown_file(title, src_path):
name, _ = os.path.splitext(os.path.basename(src_path))
src_file = name + '.py'
napkin.generate(output_format='plantuml_png', output_dir='../images')
text = """# {title}
Following examples are auto-generated by
[demo/{src_file}](demo/{src_file})
""".format(title=title, src_file=src_file)
for diagram in napkin._collected_seq_diagrams:
text += """## {name}

```python
{src}
```
""".format(src=inspect.getsource(diagram.sd_func),
name=diagram.name,
image_file=diagram.name.replace(' ', '%20') + '.png')
md_file = '../{}.md'.format(name.upper())
with open(md_file, 'wt') as f:
f.write(text)
print('MD file generated : {}'.format(md_file))
|
tests/unit/output/schema/__init__.py | jaebradley/draftkings_client | 111 | 12758925 | <gh_stars>100-1000
"""
Represents tests defined in the draft_kings.output.schema module.
Most tests center around serializing / deserializing output objects using the marshmallow library
"""
|
junction/conferences/migrations/0001_initial.py | theSage21/junction | 192 | 12758931 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django_extensions.db.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Conference",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"name",
models.CharField(max_length=255, verbose_name="Conference Name"),
),
(
"slug",
django_extensions.db.fields.AutoSlugField(
editable=False,
populate_from=("name",),
max_length=255,
blank=True,
unique=True,
),
),
("description", models.TextField(default="")),
("start_date", models.DateField(verbose_name="Start Date")),
("end_date", models.DateField(verbose_name="End Date")),
(
"status",
models.PositiveSmallIntegerField(
verbose_name="Current Status",
choices=[
(1, b"Accepting Call for Proposals"),
(2, b"Closed for Proposals"),
(3, b"Accepting Votes"),
(4, b"Schedule Published"),
],
),
),
(
"deleted",
models.BooleanField(default=False, verbose_name="Is Deleted?"),
),
(
"created_by",
models.ForeignKey(
related_name="created_conference_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conference_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ConferenceModerator",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
(
"created_by",
models.ForeignKey(
related_name="created_conferencemoderator_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"moderator",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conferencemoderator_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
],
options={"abstract": False},
bases=(models.Model,),
),
migrations.CreateModel(
name="ConferenceProposalReviewer",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"created_at",
models.DateTimeField(auto_now_add=True, verbose_name="Created At"),
),
(
"modified_at",
models.DateTimeField(
auto_now=True, verbose_name="Last Modified At"
),
),
(
"active",
models.BooleanField(default=True, verbose_name="Is Active?"),
),
(
"conference",
models.ForeignKey(
to="conferences.Conference", on_delete=models.deletion.CASCADE,
),
),
(
"created_by",
models.ForeignKey(
related_name="created_conferenceproposalreviewer_set",
verbose_name="Created By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"modified_by",
models.ForeignKey(
related_name="updated_conferenceproposalreviewer_set",
verbose_name="Modified By",
blank=True,
on_delete=models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
null=True,
),
),
(
"reviewer",
models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE,
),
),
],
options={},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="conferenceproposalreviewer",
unique_together=set([("conference", "reviewer")]),
),
]
|
smartsheet/models/webhook_stats.py | bromic007/smartsheet-python-sdk | 106 | 12758933 | # pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2017 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from ..types import *
from ..util import serialize
from ..util import deserialize
class WebhookStats(object):
"""Smartsheet WebhookStats data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the WebhookStats model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._last_callback_attempt = Timestamp()
self._last_callback_attempt_retry_count = Number()
self._last_successful_callback = Timestamp()
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
@property
def last_callback_attempt(self):
return self._last_callback_attempt.value
@last_callback_attempt.setter
def last_callback_attempt(self, value):
self._last_callback_attempt.value = value
@property
def last_callback_attempt_retry_count(self):
return self._last_callback_attempt_retry_count.value
@last_callback_attempt_retry_count.setter
def last_callback_attempt_retry_count(self, value):
self._last_callback_attempt_retry_count.value = value
@property
def last_successful_callback(self):
return self._last_successful_callback.value
@last_successful_callback.setter
def last_successful_callback(self, value):
self._last_successful_callback.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.