max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
cupyx/scipy/sparse/linalg/_norm.py | Onkar627/cupy | 6,180 | 12648260 | import numpy
import cupy
import cupyx.scipy.sparse
def _sparse_frobenius_norm(x):
if cupy.issubdtype(x.dtype, cupy.complexfloating):
sqnorm = abs(x).power(2).sum()
else:
sqnorm = x.power(2).sum()
return cupy.sqrt(sqnorm)
def norm(x, ord=None, axis=None):
"""Norm of a cupy.scipy.spmatrix
This function is able to return one of seven different sparse matrix norms,
depending on the value of the ``ord`` parameter.
Args:
x (sparse matrix) : Input sparse matrix.
ord (non-zero int, inf, -inf, 'fro', optional) : Order of the norm (see
table under ``Notes``). inf means numpy's `inf` object.
axis : (int, 2-tuple of ints, None, optional): If `axis` is an
integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm
(when `x` is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns:
ndarray : 0-D or 1-D array or norm(s).
.. seealso:: :func:`scipy.sparse.linalg.norm`
"""
if not cupyx.scipy.sparse.issparse(x):
raise TypeError(("input is not sparse. use cupy.linalg.norm"))
# Check the default case first and handle it immediately.
if axis is None and ord in (None, 'fro', 'f'):
return _sparse_frobenius_norm(x)
# Some norms require functions that are not implemented for all types.
x = x.tocsr()
if axis is None:
axis = (0, 1)
elif not isinstance(axis, tuple):
msg = "'axis' must be None, an integer or a tuple of integers"
try:
int_axis = int(axis)
except TypeError:
raise TypeError(msg)
if axis != int_axis:
raise TypeError(msg)
axis = (int_axis,)
nd = 2
if len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
raise NotImplementedError
# return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
raise NotImplementedError
# return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
return abs(x).sum(axis=row_axis).max()
elif ord == numpy.Inf:
return abs(x).sum(axis=col_axis).max()
elif ord == -1:
return abs(x).sum(axis=row_axis).min()
elif ord == -numpy.Inf:
return abs(x).sum(axis=col_axis).min()
elif ord in (None, 'f', 'fro'):
# The axis order does not matter for this norm.
return _sparse_frobenius_norm(x)
else:
raise ValueError("Invalid norm order for matrices.")
elif len(axis) == 1:
a, = axis
if not (-nd <= a < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if ord == numpy.Inf:
return abs(x).max(axis=a).A.ravel()
elif ord == -numpy.Inf:
return abs(x).min(axis=a).A.ravel()
elif ord == 0:
# Zero norm
return (x != 0).astype(numpy.float32).sum(axis=a).ravel().astype(
numpy.int_)
elif ord == 1:
# special case for speedup
return abs(x).sum(axis=a).ravel()
elif ord in (2, None):
return cupy.sqrt(abs(x).power(2).sum(axis=a)).ravel()
else:
try:
ord + 1
except TypeError:
raise ValueError('Invalid norm order for vectors.')
return cupy.power(abs(x).power(ord).sum(axis=a), 1 / ord).ravel()
else:
raise ValueError("Improper number of dimensions to norm.")
|
pyjswidgets/pyjamas/Canvas/CanvasGradientImplIE6.py | takipsizad/pyjs | 739 | 12648266 | """
* Copyright 2008 Google Inc.
* Copyright 2011 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
"""
import math
from pyjamas.Canvas.ColorStop import ColorStop
"""*
* Gradients for IE6 implementation need some extra meta info.
"""
class CanvasGradientImplIE6:
def __init__(self, x0, y0, x1, y1):
self.startX = x0
self.startY = y0
self.endX = x1
self.endY = y1
self.startRad = 0
self.endRad = 0
self.dx = x1 - x0
self.dy = y1 - y0
self.length = math.sqrt((self.dx * self.dx) + (self.dy * self.dy))
if self.dy == 0:
#self.angle = int(math.atan(self.dx) * 180 / math.pi) + 180
self.angle = 90
else:
self.angle = int(math.atan(self.dx/self.dy) * 180 / math.pi) + 180
self.colorStops = []
def addColorStop(self, offset, color):
newColorStop = ColorStop(offset, color)
for i in range(len(self.colorStops)):
cs = self.colorStops[i]
if offset < cs.offset:
self.colorStops.append(i, newColorStop)
return
self.colorStops.append(newColorStop)
"""*
* Creates an equivalent copy of this Gradient object.
*
* @return returns an equivalent copy of this gradient object
"""
def cloneGradient(self):
pass
|
mmd_tools/properties/material.py | lsr123/PX4-loacl_code | 822 | 12648274 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import bpy
from bpy.types import PropertyGroup
from bpy.props import BoolProperty, EnumProperty, FloatProperty, FloatVectorProperty, IntProperty, StringProperty
from mmd_tools.core import material
#===========================================
# Property classes
#===========================================
class MMDMaterial(PropertyGroup):
""" マテリアル
"""
name_j = StringProperty(
name='Name',
description='Japanese Name',
default='',
)
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
material_id = IntProperty(
name='Material ID',
default=-1
)
ambient_color = FloatVectorProperty(
name='Ambient',
subtype='COLOR',
size=3,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0],
)
is_double_sided = BoolProperty(
name='Double Sided',
description='',
default=True,
)
enabled_drop_shadow = BoolProperty(
name='Drop Shadow',
description='',
default=True,
)
enabled_self_shadow_map = BoolProperty(
name='Self Shadow Map',
description='',
default=True,
)
enabled_self_shadow = BoolProperty(
name='Self Shadow',
description='',
default=True,
)
enabled_toon_edge = BoolProperty(
name='Toon Edge',
description='',
default=True,
)
edge_color = FloatVectorProperty(
name='Edge Color',
subtype='COLOR',
size=4,
min=0,
max=1,
precision=3,
step=0.1,
default=[0, 0, 0, 1],
)
edge_weight = FloatProperty(
name='Edge Weight',
min=0,
max=100,
step=0.1,
default=0.5,
)
sphere_texture_type = EnumProperty(
name='Sphere Map Type',
description='',
items = [
(str(material.SPHERE_MODE_OFF), 'Off', '', 1),
(str(material.SPHERE_MODE_MULT), 'Multiply', '', 2),
(str(material.SPHERE_MODE_ADD), 'Add', '', 3),
(str(material.SPHERE_MODE_SUBTEX), 'SubTexture', '', 4),
],
)
is_shared_toon_texture = BoolProperty(
name='Use Shared Toon Texture',
description='',
default=False,
)
toon_texture = StringProperty(
name='Toon Texture',
subtype='FILE_PATH',
description='',
default='',
)
shared_toon_texture = IntProperty(
name='Shared Toon Texture',
description='',
default=0,
)
comment = StringProperty(
name='Comment',
)
|
tests/test_toggle_setting.py | steveklabnik/sublime-rust | 480 | 12648324 | <gh_stars>100-1000
"""Tests for toggle command."""
from rust_test_common import *
class TestToggle(TestBase):
def test_toggle(self):
window = sublime.active_window()
self.assertEqual(
util.get_setting('rust_syntax_checking', True),
True)
window.run_command('toggle_rust_syntax_setting')
self.assertEqual(
util.get_setting('rust_syntax_checking', True),
False)
window.run_command('toggle_rust_syntax_setting')
self.assertEqual(
util.get_setting('rust_syntax_checking', True),
True)
|
Python/tower_of_hanoi.py | MjCode01/DS-Algo-Point | 1,148 | 12648332 | def TowerOfHanoi(n , first, last, mid):
if n == 1:
print ("Move disk 1 from rod",first,"to rod",last)
return
TowerOfHanoi(n-1, first, mid, last)
print ("Move disk",n,"from rod",first,"to rod",last )
TowerOfHanoi(n-1, mid, last, first)
n=int(input())
TowerOfHanoi(n, 'F', 'M', 'L') # First Rod-> F, Middle rod -> M, Last Rod -> L
"""
Complexity of the code
-Time Complexity - O(2^n)
-Space Complexity - O(2^n)
""" |
recipes/Python/496905_ActiveRecord_like_ORM_object_relatimapper_200/recipe-496905.py | tdiprima/code | 2,023 | 12648334 | # this is storm.py
import string, new, MySQLdb
from types import *
from MySQLdb.cursors import DictCursor
bag_belongs_to, bag_has_many = [],[]
def belongs_to(what): bag_belongs_to.append(what)
def has_many(what): bag_has_many.append(what)
class Mysqlwrapper:
def __init__(self,**kwds):
self.conn = MySQLdb.connect(cursorclass=DictCursor,**kwds)
self.cursor = self.conn.cursor()
self.escape = self.conn.escape_string
self.insert_id = self.conn.insert_id
self.commit = self.conn.commit
self.q = self.cursor.execute
def qone(self,query):
self.q(query)
return self.cursor.fetchone()
def qall(self,query):
self.q(query)
return self.cursor.fetchall()
class MetaRecord(type):
def __new__(cls, name, bases, dct):
global bag_belongs_to, bag_has_many
if name in globals(): return globals()[name]
else:
Record = type.__new__(cls, name, bases, dct)
for i in bag_belongs_to: Record.belongs_to(i)
for i in bag_has_many: Record.has_many(i)
bag_belongs_to = []
hag_has_many = []
return Record
class Storm(dict):
__metaclass__ = MetaRecord
__CONN = None
@classmethod
def belongs_to(cls, what):
def dah(self):
belong_cls = globals().get(what,None)
if not belong_cls:
belong_cls = type(what,(Storm,),{})
return belong_cls.selectone(self[what+'_id'])
setattr(cls,what,new.instancemethod(dah,None,cls))
@classmethod
def has_many(cls, what):
def dah(self):
hasmany_cls = globals().get(what,None)
if not hasmany_cls:
hasmany_cls = type(what,(Storm,),{})
dct={}
dct[string.lower(cls.__name__)+'_id']=self['id']
return hasmany_cls.select(**dct)
setattr(cls,what,new.instancemethod(dah,None,cls))
@classmethod
def conn(cls, **kwds):
if not cls.__CONN: cls.__CONN = Mysqlwrapper(**kwds)
@classmethod
def exe(cls,s):
if not cls.__CONN: raise "Database not connected"
return cls.__CONN.qall(s)
@classmethod
def insert(cls,**kwds):
vs = [[k,cls.__CONN.escape(str(kwds[k]))] for k in kwds]
if vs:
s = "insert into %s (%s) values ('%s')" % (
string.lower(cls.__name__), ','.join([v[0] for v in vs]),
"','".join([v[1] for v in vs]))
cls.__CONN.q(s)
cls.__CONN.commit()
return cls.__CONN.insert_id()
else: raise "nothing to insert"
@classmethod
def select(cls,*args, **kwds):
if len(args)==1 and (type(args[0])==IntType or type(args[0])==LongType):
q = "select * from %s where id='%s'"%(string.lower(cls.__name__),args[0])
where = "where id='%s'"%args[0]
else:
if args: s = ",".join(args)
else: s = "*"
if kwds:
c,limit,orderby = [],'',''
for k in kwds:
if k == 'limit': limit = "limit "+str(kwds[k])
elif k == 'order': orderby = "order by "+str(kwds[k])
else: c.append(k+"='"+str(kwds[k])+"'")
where = " and ".join(c)
if where: where = "where %s"%where
where = "%s %s %s"%(where,orderby,limit)
else: where = ""
q = " ".join(['select',s,'from',string.lower(cls.__name__),where])
r = cls.__CONN.qall(q)
list = []
for i in r:
list.append(cls(i))
list[-1].__dict__['where'] = where
return list
@classmethod
def selectone(cls,*args, **kwds):
r = cls.select(*args,**kwds)
if r: return r[0]
else: return {}
@classmethod
def update(cls,cond,**kwds):
if not cond or not kwds: raise "Update What?!"
if type(cond) == IntType: w = "id='%d'" % cond
else: w = cond
vs = [[k,cls.__CONN.escape(str(kwds[k]))] for k in kwds]
if vs:
s = "UPDATE %s SET %s WHERE %s" % ( string.lower(cls.__name__),
','.join(["%s='%s'"%(v[0],v[1]) for v in vs]), w)
cls.__CONN.q(s)
cls.__CONN.commit()
@classmethod
def delete(cls,id):
if type(id) == IntType:
cls.__CONN.q("delete from %s where id='%d'"%
(string.lower(cls.__name__),id))
cls.__CONN.commit()
else: raise "Only accept integer argument"
def __init__(self,dct={}):
if not self.__class__.__CONN: raise "Database not connected"
dict.__init__(self,dct)
self.__dict__['cur_table']= string.lower(self.__class__.__name__)
self.__dict__['where']= ''
self.__dict__['sql_buff']={}
def sql(self,sql): self.__class__.__CONN.q(sql)
def save(self):
s = ""
if self.where:
f = []
for v in self.sql_buff:
f.append("%s='%s'"%(v,self.sql_buff[v]))
s = "UPDATE %s SET %s %s" % (
self.cur_table, ','.join(f), self.where)
else:
f,i=[],[]
for v in self.sql_buff:
f.append(v)
i.append(self.sql_buff[v])
if f and i:
s = "INSERT INTO %s (%s) VALUES ('%s')" % (
self.cur_table, ','.join(f), "','".join(i))
if s:
self.__class__.__CONN.q(s)
self.__class__.__CONN.commit()
else: raise "nothing to insert"
def __setattr__(self,attr,value):
if attr in self.__dict__: self.__dict__[attr]=value
else:
v = self.__class__.__CONN.escape(str(value))
self.__dict__['sql_buff'][attr] = v
self[attr] = v
def __getattr__(self,attr):
if attr in self.__dict__: return self.__dict__[attr]
try: return self[attr]
except KeyError: pass
raise AttributeError
__all__ = ['Storm', 'belongs_to', 'has_many']
#----------------- end of storm.py ----------------
Below is a session screenshot of using this ORM(Storm):
-------------------------------------------------------------
wang@dapper-03:~/spark/lib$ mysql -u root
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 46 to server version: 5.0.22-Debian_0ubuntu6.06-log
Type 'help;' or '\h' for help. Type '\c' to clear the buffer.
mysql> create database teststorm;
Query OK, 1 row affected (0.00 sec)
mysql> use teststorm;
Database changed
mysql> create table author(id int auto_increment primary key,name varchar(50));
Query OK, 0 rows affected (0.06 sec)
mysql> create table book(id int auto_increment primary key,author_id int,title varchar(100));
Query OK, 0 rows affected (0.01 sec)
mysql> describe author;
+-------+-------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+-------+-------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| name | varchar(50) | YES | | NULL | |
+-------+-------------+------+-----+---------+----------------+
2 rows in set (0.00 sec)
mysql> describe book;
+-----------+--------------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+-----------+--------------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| author_id | int(11) | YES | | NULL | |
| title | varchar(100) | YES | | NULL | |
+-----------+--------------+------+-----+---------+----------------+
3 rows in set (0.00 sec)
mysql> Bye
wang@dapper-03:~/spark/lib$ python
Python 2.4.3 (#2, Apr 27 2006, 14:43:58)
[GCC 4.0.3 (Ubuntu 4.0.3-1ubuntu5)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> from storm import *
>>> class Author(Storm):
... has_many('book')
...
>>> class Book(Storm):
... belongs_to('author')
...
>>> Storm.conn(user='root',db='teststorm')
>>> a = Author()
>>> a.name = 'Tolstoy'
>>> a.save()
>>> Author.insert(name='<NAME>')
0L
>>> a.name, a['name']
('Tolstoy', 'Tolstoy')
>>> o = Author.selectone(2)
>>> o
{'id': 2L, 'name': '<NAME>'}
>>> o.id, o.name, o['id'], o['name']
(2L, '<NAME>', 2L, '<NAME>')
>>> b = Book()
>>> b.author_id = 1
>>> b.title = '<NAME>'
>>> b.save()
>>> b.title = 'War and Peace'
>>> b.save()
>>> b.author_id = 2
>>> b.title = 'Great Expectations'
>>> b.save()
>>> Book.insert(author_id=2,title='A Tale of Two Cities')
0L
>>> Book.insert(author_id=2,title='<NAME>')
0L
>>> all = Book.select()
>>> all
[{'author_id': 1L, 'id': 1L, 'title': '<NAME>'}, {'author_id': 1L, 'id': 2L, 'title': 'War and Peace'},
{'author_id': 2L, 'id': 3L, 'title': 'Great Expectations'}, {'author_id': 2L, 'id': 4L, 'title':
'A Tale of Two Cities'}, {'author_id': 2L, 'id': 5L, 'title': '<NAME>'}]
>>> o = Book.selectone(4)
>>> a = o.author()
>>> a
{'id': 2L, 'name': '<NAME>'}
>>> a = Author.selectone(name='Tolstoy')
>>> a
{'id': 1L, 'name': 'Tolstoy'}
>>> b = a.book()
>>> b
[{'author_id': 1L, 'id': 1L, 'title': '<NAME>'}, {'author_id': 1L, 'id': 2L, 'title': 'War and Peace'}]
>>> b[0].title, b[1].title
('<NAME>', 'War and Peace')
>>>
wang@dapper-03:~/spark/lib$
|
examples/advanced/compute_render_to_texture.py | minuJeong/moderngl-window | 142 | 12648336 | import moderngl as mgl
from pathlib import Path
import moderngl_window as mglw
from moderngl_window import geometry
class ComputeRenderToTexture(mglw.WindowConfig):
"""Simple example rendering to a texture with a compute shader"""
title = "Render Texture Using Compute Shader"
resource_dir = (Path(__file__) / '../../resources').resolve()
gl_version = 4, 3
aspect_ratio = 1.0
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.compute_shader = self.load_compute_shader('programs/compute/render_to_texture.glsl')
self.compute_shader['destTex'] = 0
self.texture_program = self.load_program('programs/texture.glsl')
self.quad_fs = geometry.quad_fs()
self.texture = self.ctx.texture((256, 256), 4)
self.texture.filter = mgl.NEAREST, mgl.NEAREST
def render(self, time, frame_time):
self.ctx.clear(0.3, 0.3, 0.3)
w, h = self.texture.size
gw, gh = 16, 16
nx, ny, nz = int(w / gw), int(h / gh), 1
try:
self.compute_shader['time'] = time
except Exception:
pass
# Automatically binds as a GL_R32F / r32f (read from the texture)
self.texture.bind_to_image(0, read=False, write=True)
self.compute_shader.run(nx, ny, nz)
# Render texture
self.texture.use(location=0)
self.quad_fs.render(self.texture_program)
if __name__ == '__main__':
mglw.run_window_config(ComputeRenderToTexture)
|
updater_enc.py | takerum/neural-collage | 580 | 12648337 | <reponame>takerum/neural-collage<gh_stars>100-1000
import numpy as np
import chainer
import chainer.functions as F
from chainer import Variable
import chainercv
def reconstruction_loss(dis, recon, gt):
with chainer.using_config('train', False):
v1 = dis.feature_vector(recon)
v2 = dis.feature_vector(gt)
denom = F.sqrt(F.batch_l2_norm_squared(v1) * F.batch_l2_norm_squared(v2))
return -F.sum(F.reshape(F.batch_matmul(v1, v2, transa=True), (v1.shape[0],)) / denom)
class UpdaterEnc(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.models = kwargs.pop('models')
if 'input_size' in kwargs:
self.input_size = kwargs.pop('input_size')
else:
self.input_size = None
self.loss_func = reconstruction_loss
super(UpdaterEnc, self).__init__(*args, **kwargs)
def get_batch(self, xp):
batch = self.get_iterator('main').next()
batchsize = len(batch)
x = []
gt = []
c = []
for j in range(batchsize):
x.append(np.asarray(batch[j][0]).astype("f"))
gt.append(np.asarray(batch[j][1]).astype("f"))
c.append(np.asarray(batch[j][2]).astype(np.int32))
x = Variable(xp.asarray(x))
gt = Variable(xp.asarray(gt))
c = Variable(xp.asarray(c))
return x, gt, c
def update_core(self):
gen = self.models['gen']
dis = self.models['dis']
enc = self.models['enc']
enc_optimizer = self.get_optimizer('opt_enc')
xp = enc.xp
# fetch batch
x, gt, c = self.get_batch(xp)
if self.input_size is not None:
_x = []
for img in x.data.get():
_x.append(chainercv.transforms.resize(img, (self.input_size, self.input_size)))
x = Variable(xp.asarray(_x))
z = enc(x, y=c)
with chainer.using_config('train', False):
recon = gen(batchsize=len(z), z=z, y=c)
loss = reconstruction_loss(dis, recon, gt)
enc.cleargrads()
loss.backward()
enc_optimizer.update()
chainer.reporter.report({'loss': loss})
chainer.reporter.report({'min_slope': xp.min(enc.prelu_out.W.data)})
chainer.reporter.report({'max_slope': xp.max(enc.prelu_out.W.data)})
chainer.reporter.report({'min_z': xp.min(z.data)})
chainer.reporter.report({'max_z': xp.max(z.data)})
|
utils/track_seq.py | yiling-chen/MBMD | 220 | 12648342 | import tensorflow as tf
import numpy as np
from google.protobuf import text_format
from object_detection.protos import pipeline_pb2
from core.model_builder import build_man_model
from object_detection.core import box_list
from object_detection.core import box_list_ops
from PIL import Image
import scipy.io as sio
import cv2
import os
from region_to_bbox import region_to_bbox
import time
import random
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def _compile_results(gt, bboxes, dist_threshold):
l = np.size(bboxes, 0)
gt4 = np.zeros((l, 4))
new_distances = np.zeros(l)
new_ious = np.zeros(l)
n_thresholds = 50
precisions_ths = np.zeros(n_thresholds)
for i in range(l):
gt4[i, :] = region_to_bbox(gt[i, :], center=False)
new_distances[i] = _compute_distance(bboxes[i, :], gt4[i, :])
new_ious[i] = _compute_iou(bboxes[i, :], gt4[i, :])
# what's the percentage of frame in which center displacement is inferior to given threshold? (OTB metric)
precision = sum(new_distances < dist_threshold)/float(np.size(new_distances)) * 100.0
# find above result for many thresholds, then report the AUC
thresholds = np.linspace(0, 25, n_thresholds+1)
thresholds = thresholds[-n_thresholds:]
# reverse it so that higher values of precision goes at the beginning
thresholds = thresholds[::-1]
for i in range(n_thresholds):
precisions_ths[i] = sum(new_distances < thresholds[i])/float(np.size(new_distances))
# integrate over the thresholds
precision_auc = np.trapz(precisions_ths)
# per frame averaged intersection over union (OTB metric)
iou = np.mean(new_ious) * 100
return l, precision, precision_auc, iou
def _compute_distance(boxA, boxB):
a = np.array((boxA[0]+boxA[2]/2, boxA[1]+boxA[3]/2))
b = np.array((boxB[0]+boxB[2]/2, boxB[1]+boxB[3]/2))
dist = np.linalg.norm(a - b)
assert dist >= 0
assert dist != float('Inf')
return dist
def _compute_iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[0] + boxA[2], boxB[0] + boxB[2])
yB = min(boxA[1] + boxA[3], boxB[1] + boxB[3])
if xA < xB and yA < yB:
# compute the area of intersection rectangle
interArea = (xB - xA) * (yB - yA)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = boxA[2] * boxA[3]
boxBArea = boxB[2] * boxB[3]
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the intersection area
iou = interArea / float(boxAArea + boxBArea - interArea)
else:
iou = 0
assert iou >= 0
assert iou <= 1.01
return iou
def get_configs_from_pipeline_file(config_file):
"""Reads training configuration from a pipeline_pb2.TrainEvalPipelineConfig.
Reads training config from file specified by pipeline_config_path flag.
Returns:
model_config: model_pb2.DetectionModel
train_config: train_pb2.TrainConfig
input_config: input_reader_pb2.InputReader
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(config_file, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model.ssd
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
eval_config = pipeline_config.eval_config
return model_config, train_config, input_config, eval_config
def show_res(im, box, win_name,score=None,save_path=None,frame_id=None):
cv2.namedWindow(win_name)
cv2.rectangle(im, (box[1], box[0]),
(box[3], box[2]), [0, 255, 0], 2)
if win_name == '2':
cv2.putText(im,str(score[0]),(20,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),1)
cv2.putText(im,str(score[1]),(20,40),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),1)
save_path = save_path + '%04d.jpg'%frame_id
#cv2.imwrite(save_path,im[:,:,-1::-1])
#cv2.waitKey(1)
cv2.imshow(win_name, im[:, :, -1::-1])
cv2.waitKey(1)
def restore_model(sess, model_scope, checkpoint_path, variables_to_restore):
# variables_to_restore = tf.global_variables()
name_to_var_dict = dict([(var.op.name.lstrip(model_scope+'/'), var) for var in variables_to_restore
if not var.op.name.endswith('Momentum')])
saver = tf.train.Saver(name_to_var_dict)
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)
saver.restore(sess, latest_checkpoint)
def crop_search_region(img, gt, win_size, scale=4, mean_rgb=128, offset=None):
# gt: [ymin, xmin, ymax, xmax]
bnd_ymin, bnd_xmin, bnd_ymax, bnd_xmax = gt
bnd_w = bnd_xmax - bnd_xmin
bnd_h = bnd_ymax - bnd_ymin
# cx, cy = gt[:2] + gt[2:] / 2
cy, cx = (bnd_ymin + bnd_ymax)/2, (bnd_xmin+bnd_xmax)/2
diag = np.sum( bnd_h** 2 + bnd_w**2) ** 0.5
origin_win_size = diag * scale
origin_win_size_h, origin_win_size_w = bnd_h * scale, bnd_w * scale
# origin_win_size_h = origin_win_size
# origin_win_size_w = origin_win_size
im_size = img.size[1::-1]
min_x = np.round(cx - origin_win_size_w / 2).astype(np.int32)
max_x = np.round(cx + origin_win_size_w / 2).astype(np.int32)
min_y = np.round(cy - origin_win_size_h / 2).astype(np.int32)
max_y = np.round(cy + origin_win_size_h / 2).astype(np.int32)
if offset is not None:
min_offset_y, max_offset_y = (bnd_ymax - max_y, bnd_ymin - min_y)
min_offset_x, max_offset_x = (bnd_xmax - max_x, bnd_xmin - min_x)
offset[0] = np.clip(offset[0] * origin_win_size_h, min_offset_y, max_offset_y)
offset[1] = np.clip(offset[1] * origin_win_size_w, min_offset_x, max_offset_x)
offset = np.int32(offset)
min_y += offset[0]
max_y += offset[0]
min_x += offset[1]
max_x += offset[1]
win_loc = np.array([min_y, min_x])
gt_x_min, gt_y_min = ((bnd_xmin-min_x)/origin_win_size_w, (bnd_ymin - min_y)/origin_win_size_h) #coordinates on window
gt_x_max, gt_y_max = [(bnd_xmax-min_x)/origin_win_size_w, (bnd_ymax - min_y)/origin_win_size_h] #relative coordinates of gt bbox to the search region
unscaled_w, unscaled_h = [max_x - min_x + 1, max_y - min_y + 1]
min_x_win, min_y_win, max_x_win, max_y_win = (0, 0, unscaled_w, unscaled_h)
min_x_im, min_y_im, max_x_im, max_y_im = (min_x, min_y, max_x+1, max_y+1)
img = img.crop([min_x_im, min_y_im, max_x_im, max_y_im])
img_array = np.array(img)
if min_x < 0:
min_x_im = 0
min_x_win = 0 - min_x
if min_y < 0:
min_y_im = 0
min_y_win = 0 - min_y
if max_x+1 > im_size[1]:
max_x_im = im_size[1]
max_x_win = unscaled_w - (max_x + 1 - im_size[1])
if max_y+1 > im_size[0]:
max_y_im = im_size[0]
max_y_win = unscaled_h- (max_y +1 - im_size[0])
unscaled_win = np.ones([unscaled_h, unscaled_w, 3], dtype=np.uint8) * np.uint8(mean_rgb)
unscaled_win[min_y_win:max_y_win, min_x_win:max_x_win] = img_array[min_y_win:max_y_win, min_x_win:max_x_win]
unscaled_win = Image.fromarray(unscaled_win)
height_scale, width_scale = np.float32(unscaled_h)/win_size, np.float32(unscaled_w)/win_size
win = unscaled_win.resize([win_size, win_size], resample=Image.BILINEAR)
# win = sp.misc.imresize(unscaled_win, [win_size, win_size])
return win, np.array([gt_y_min, gt_x_min, gt_y_max, gt_x_max]), win_loc, [height_scale, width_scale]
# return win, np.array([gt_x_min, gt_y_min, gt_x_max, gt_y_max]), diag, np.array(win_loc)
def generate_init_training_samples(img, box, win_size, src_scales=None, tar_scales=None, batch_size=20, mean_rgb=128):
if src_scales is None:
src_scales = [1.2, 3]
if tar_scales is None:
tar_scales = [3.7, 4.5]
out_images = np.zeros([batch_size, 1, win_size, win_size, 3], dtype=np.uint8)
out_gt_box = np.zeros([batch_size, 1, 4], dtype=np.float32)
init_img = img.crop(np.int32([box[1], box[0], box[3], box[2]]))
init_img = init_img.resize([128,128], resample=Image.BILINEAR)
init_img = np.array(init_img)
init_img = np.expand_dims(np.expand_dims(init_img,axis=0),axis=0)
init_img = np.tile(init_img,(batch_size,1,1,1,1))
for ind in range(batch_size):
src_scale = np.random.rand(1)[0]*(src_scales[1]-src_scales[0]) + src_scales[0]
tar_scale = np.random.rand(1)[0]*(tar_scales[1]-tar_scales[0]) + tar_scales[0]
src_offset = np.random.laplace(0, 0.2, [2])
tar_offset = np.random.laplace(0, 0.2, [2])
# src_win, src_gt, _, _ = crop_search_region(img, box, win_size, src_scale, offset=src_offset)
tar_win, tar_gt, _, _ = crop_search_region(img, box, win_size, tar_scale, offset=tar_offset)
#out_images[ind, 0] = init_img
out_images[ind, 0] = tar_win
out_gt_box[ind, 0] = tar_gt
return out_images, init_img,out_gt_box
def build_test_graph(model, model_scope, reuse=None,weights_dict=None):
input_init_gt_box = tf.constant(np.zeros((1,4)), dtype=tf.float32)
# input_init_image = tf.constant(init_img_array, dtype=tf.uint8)
input_init_image = tf.placeholder(dtype=tf.uint8, shape=[128,128,3])
input_cur_image = tf.placeholder(dtype=tf.uint8, shape=[300,300,3])
init_gt_box = tf.reshape(input_init_gt_box, shape=[1,1,4])
groundtruth_classes = tf.ones(dtype=tf.float32, shape=[1,1,1])
float_init_image = tf.to_float(input_init_image)
float_init_image = tf.expand_dims(tf.expand_dims(float_init_image, axis=0), axis=0)
preprocessed_init_image = model.preprocess(float_init_image, [128,128])
images = tf.expand_dims(input_cur_image, axis=0)
float_images = tf.to_float(images)
preprocessed_images = model.preprocess(float_images)
preprocessed_images = tf.expand_dims(preprocessed_images, axis=0)
model.provide_groundtruth(init_gt_box,
groundtruth_classes,
None)
with tf.variable_scope(model_scope, reuse=reuse):
prediction_dict = model.predict(preprocessed_init_image, preprocessed_images,istraining=False,reuse=reuse)
detections = model.postprocess(prediction_dict)
original_image_shape = tf.shape(preprocessed_images)
absolute_detection_boxlist = box_list_ops.to_absolute_coordinates(
box_list.BoxList(tf.squeeze(detections['detection_boxes'], axis=0)),
original_image_shape[2], original_image_shape[3])
return absolute_detection_boxlist.get(), detections['detection_scores'], input_cur_image, input_init_image
def build_extract_feature_graph(model, model_scope,reuse=None):
batch_size = 20
seq_len = 1
image = tf.placeholder(dtype=tf.uint8, shape=[batch_size, seq_len, 300,300,3])
float_image = tf.to_float(image)
float_image = tf.reshape(float_image,[-1,300,300,3])
preprocessed_images = model.preprocess(float_image)
preprocessed_images = tf.reshape(preprocessed_images,[batch_size,seq_len,300,300,3])
random_noise = tf.random_normal([batch_size, seq_len, 300, 300, 3], mean=0, stddev=0.1)
preprocessed_images = preprocessed_images + random_noise
with tf.variable_scope(model_scope, reuse=reuse):
output_dict = model.extract_feature(preprocessed_images)
init_image = tf.placeholder(dtype=tf.uint8, shape=[1,seq_len, 128,128,3])
float_init_image = tf.to_float(init_image)
float_init_image = tf.reshape(float_init_image,[-1,128,128,3])
preprocessed_init_images = model.preprocess(float_init_image,[128,128])
preprocessed_init_images = tf.reshape(preprocessed_init_images,[1,seq_len,128,128,3])
with tf.variable_scope(model_scope, reuse=reuse):
init_feature_maps = model.extract_init_feature(preprocessed_init_images)
return image, init_image, output_dict, init_feature_maps
def build_extract_feature_graph1(model, model_scope,reuse=None):
batch_size = 5
seq_len = 1
image = tf.placeholder(dtype=tf.uint8, shape=[batch_size, seq_len, 300,300,3])
float_image = tf.to_float(image)
float_image = tf.reshape(float_image,[-1,300,300,3])
preprocessed_images = model.preprocess(float_image)
preprocessed_images = tf.reshape(preprocessed_images,[batch_size,seq_len,300,300,3])
random_noise = tf.random_normal([batch_size, seq_len, 300, 300, 3], mean=0, stddev=0.1)
preprocessed_images = preprocessed_images + random_noise
with tf.variable_scope(model_scope, reuse=reuse):
output_dict = model.extract_feature(preprocessed_images)
init_image = tf.placeholder(dtype=tf.uint8, shape=[1,seq_len, 128,128,3])
float_init_image = tf.to_float(init_image)
float_init_image = tf.reshape(float_init_image,[-1,128,128,3])
preprocessed_init_images = model.preprocess(float_init_image,[128,128])
preprocessed_init_images = tf.reshape(preprocessed_init_images,[1,seq_len,128,128,3])
with tf.variable_scope(model_scope, reuse=reuse):
init_feature_maps = model.extract_init_feature(preprocessed_init_images)
return image, init_image, output_dict, init_feature_maps
# def build_train_boxpredictor_graph(model, model_scope,reuse=None):
# batch_size = 20
# seq_len = 1
# init_features = tf.placeholder(dtype=tf.float32, shape=[batch_size,seq_len,1,1,])
def build_train_graph(model,model_scope, lr=1e-5, reuse=None):
batch_size = 20
seq_len = 1
featureOp0 = tf.placeholder(dtype=tf.float32, shape=[batch_size,19,19,512])
featureOp1 = tf.placeholder(dtype=tf.float32, shape=[batch_size,10,10,512])
# featureOp2 = tf.placeholder(dtype=tf.float32, shape=[batch_size,5,5,256])
# featureOp3 = tf.placeholder(dtype=tf.float32, shape=[batch_size,3,3,256])
# featureOp4 = tf.placeholder(dtype=tf.float32, shape=[batch_size,2,2,256])
# featureOp5 = tf.placeholder(dtype=tf.float32, shape=[batch_size,1,1,256])
initFeatureOp = tf.placeholder(dtype=tf.float32, shape=[batch_size,1,1,512])
feature_maps = [featureOp0,featureOp1]
train_gt_box = tf.placeholder(dtype=tf.float32, shape=[batch_size,seq_len,4])
train_gt_class = tf.ones(dtype=tf.uint8, shape=[batch_size,seq_len,1])
model.provide_groundtruth(train_gt_box,train_gt_class,None)
with tf.variable_scope(model_scope,reuse=reuse):
train_prediction_dict = model.predict_box(initFeatureOp,feature_maps,istraining=True)
losses_dict = model.loss(train_prediction_dict)
total_loss = 0
# total_loss = losses_dict['classification_loss']
for loss in losses_dict.values():
total_loss += loss
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
# optimizer = tf.train.AdamOptimizer()
variables_to_restore = tf.global_variables()
all_trainable_variables = tf.trainable_variables()
trainable_variables = [var for var in all_trainable_variables if (var.op.name.startswith(model_scope + '/BoxPredictor') )]
grad_vars = optimizer.compute_gradients(total_loss, trainable_variables)
for grad, var in grad_vars:
if grad is not None:
if var.name.endswith("Conv3x3_OutPut_40/weights:0") or var.name.endswith("Conv3x3_OutPut_40/biases:0") or var.name.endswith("Conv3x3_OutPut_20/weights:0") \
or var.name.endswith("Conv3x3_OutPut_20/biases:0") or var.name.endswith("Conv1x1_OutPut_20/weights:0") or var.name.endswith("Conv1x1_OutPut_20/biases:0") \
or var.name.endswith("Conv1x1_OutPut_10/weights:0") or var.name.endswith(
"Conv1x1_OutPut_10/biases:0"):
grad *= 10.0
grad_updates = optimizer.apply_gradients(grad_vars)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
return train_tensor, variables_to_restore,featureOp0, featureOp1, initFeatureOp, train_gt_box
# def build_train_graph(model, model_scope, lr=1e-3, reuse=None): # build training graph
# batch_size = 20
# seq_len = 1
# train_gt_box =tf.placeholder(dtype=tf.float32, shape=[batch_size, seq_len, 4])
# train_image = tf.placeholder(dtype=tf.uint8, shape=[batch_size, seq_len, 300,300,3])
# train_init_image = tf.placeholder(dtype=tf.uint8, shape=[batch_size, seq_len, 128,128,3])
# train_gt_class = tf.ones(dtype=tf.uint8, shape=[batch_size, seq_len,1])
# train_float_images = tf.to_float(train_image)
# train_float_images = tf.reshape(train_float_images, [-1, 300, 300, 3])
# train_preprocessed_images = model.preprocess(train_float_images)
# train_preprocessed_images = tf.reshape(train_preprocessed_images, [batch_size, seq_len, 300,300,3])
#
# train_float_init_images = tf.to_float(train_init_image)
# train_float_init_images = tf.reshape(train_float_init_images, [-1, 128, 128, 3])
# train_preprocessed_init_images = model.preprocess(train_float_init_images,[128,128])
# train_preprocessed_init_images = tf.reshape(train_preprocessed_init_images, [batch_size, seq_len, 128,128,3])
#
# random_noise = tf.random_normal([batch_size, seq_len, 300, 300, 3], mean=0, stddev=0.1)
# train_preprocessed_images = train_preprocessed_images + random_noise
# model.provide_groundtruth(train_gt_box,
# train_gt_class,
# None)
# with tf.variable_scope(model_scope, reuse=reuse):
# train_prediction_dict = model.predict(train_preprocessed_init_images,train_preprocessed_images,istraining=True)
# losses_dict = model.loss(train_prediction_dict)
# total_loss = 0
# # total_loss = losses_dict['classification_loss']
# for loss in losses_dict.values():
# total_loss += loss
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
# # optimizer = tf.train.AdamOptimizer()
# variables_to_restore = tf.global_variables()
# all_trainable_variables = tf.trainable_variables()
# trainable_variables = [var for var in all_trainable_variables if var.op.name.startswith(model_scope + '/BoxPredictor')]
# grad_vars = optimizer.compute_gradients(total_loss, trainable_variables)
# grad_updates = optimizer.apply_gradients(grad_vars)
# update_ops.append(grad_updates)
# update_op = tf.group(*update_ops)
# with tf.control_dependencies([update_op]):
# train_tensor = tf.identity(total_loss, name='train_op')
# def _build_feed_dict(x, y,z):
# return {train_image: x, train_init_image:z,train_gt_box: y}
# return train_tensor, _build_feed_dict, variables_to_restore
def main(_):
init_training = True
config_file = 'model/ssd_mobilenet_tracking.config'
# checkpoint_dir = '../model/server13_alov'
# checkpoint_dir = '../model/ssd_mobilenet_alov2'
# checkpoint_dir = '../model/server13_alov2'
# checkpoint_dir = '../model/server14_alov2'
# checkpoint_dir = '../model/server13_alov_2.2'
# checkpoint_dir = '../model/server13_alov2.4.0'
# checkpoint_dir = '../model/ssd_mobilenet_alov2.4.1'
# checkpoint_dir = '../model/server12_alov2.4.3'
checkpoint_dir = 'model/dump'
model_config, train_config, input_config, eval_config = get_configs_from_pipeline_file(config_file)
model = build_man_model(model_config=model_config, is_training=False)
model_scope = 'model'
reuse = None
if init_training:
imageOp, initImageOp, featureDict, initFeature = build_extract_feature_graph(model, model_scope, reuse=reuse)
loss_op, variables_to_restore, featureOp0, featureOp1, initFeatureOp, train_gt_boxOp = build_train_graph(model,
model_scope,
reuse=reuse)
reuse = True
pre_box_tensor, scores_tensor, input_cur_image, input_init_image = build_test_graph(model, model_scope,
reuse=reuse)
image_root = '/home/xiaobai/dataset/OTB/'
titles = os.listdir(image_root)
titles.sort()
precisions = np.zeros(len(titles))
precisions_auc = np.zeros(len(titles))
ious = np.zeros(len(titles))
lengths = np.zeros(len(titles))
speed = np.zeros(len(titles))
for title_id in range(len(titles)):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
if not init_training:
variables_to_restore = tf.global_variables()
restore_model(sess, model_scope, checkpoint_dir, variables_to_restore)
title = titles[title_id]
#title = 'BlurCar2'
image_path = image_root + title+'/img/'
gt_path = image_root +title+ '/groundtruth_rect.txt'
try:
gt = np.loadtxt(gt_path)
except:
gt = np.loadtxt(gt_path, delimiter=',')
num_frames = gt.shape[0]
frame_list = os.listdir(image_path)
frame_list = [frame for frame in frame_list if frame.endswith('jpg')]
frame_list.sort()
# init_img = Image.open(image_path + '0001.jpg')
init_img = Image.open(image_path + frame_list[0])
init_gt = gt[0]
init_gt = [init_gt[1], init_gt[0], init_gt[1]+init_gt[3], init_gt[0]+init_gt[2]] # ymin xmin ymax xmax
# init_gt = [x-20 for x in ini_gt]
init_img_array = np.array(init_img)
expand_channel = False
if init_img_array.ndim < 3:
init_img_array = np.expand_dims(init_img_array, axis=2)
init_img_array = np.repeat(init_img_array, repeats=3, axis=2)
init_img = Image.fromarray(init_img_array)
expand_channel = True
gt_boxes = np.zeros((1,4))
gt_boxes[0,0] = init_gt[0] / float(init_img.height)
gt_boxes[0,1] = init_gt[1] / float(init_img.width)
gt_boxes[0,2] = init_gt[2] / float(init_img.height)
gt_boxes[0,3] = init_gt[3] / float(init_img.width)
img1_xiaobai = np.array(init_img)
pad_x = 36.0 / 264.0 * (gt_boxes[0, 3] - gt_boxes[0, 1]) * init_img.width
pad_y = 36.0 / 264.0 * (gt_boxes[0, 2] - gt_boxes[0, 0]) * init_img.height
cx = (gt_boxes[0, 3] + gt_boxes[0, 1]) / 2.0 * init_img.width
cy = (gt_boxes[0, 2] + gt_boxes[0, 0]) / 2.0 * init_img.height
startx = gt_boxes[0, 1] * init_img.width - pad_x
starty = gt_boxes[0, 0] * init_img.height - pad_y
endx = gt_boxes[0, 3] * init_img.width + pad_x
endy = gt_boxes[0, 2] * init_img.height + pad_y
left_pad = max(0, int(-startx))
top_pad = max(0, int(-starty))
right_pad = max(0, int(endx - init_img.width + 1))
bottom_pad = max(0, int(endy - init_img.height + 1))
startx = int(startx + left_pad)
starty = int(starty + top_pad)
endx = int(endx + left_pad)
endy = int(endy + top_pad)
if top_pad or left_pad or bottom_pad or right_pad:
r = np.pad(img1_xiaobai[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant',
constant_values=128)
g = np.pad(img1_xiaobai[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant',
constant_values=128)
b = np.pad(img1_xiaobai[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)), mode='constant',
constant_values=128)
r = np.expand_dims(r, 2)
g = np.expand_dims(g, 2)
b = np.expand_dims(b, 2)
# h, w = r.shape
# r1 = np.zeros([h, w, 1], dtype=np.float32)
# r1[:, :, 0] = r
# g1 = np.zeros([h, w, 1], dtype=np.float32)
# g1[:, :, 0] = g
# b1 = np.zeros([h, w, 1], dtype=np.float32)
# b1[:, :, 0] = b
img1_xiaobai = np.concatenate((r, g, b), axis=2)
img1_xiaobai = Image.fromarray(img1_xiaobai)
# gt_boxes resize
init_img_crop = img1_xiaobai.crop(np.int32([startx, starty, endx, endy]))
# init_img_crop, _, _, _ = crop_search_region(init_img, init_gt, 300, 1.5)
#init_img_crop = init_img.crop(np.int32([init_gt[1], init_gt[0], init_gt[3], init_gt[2]]))
init_img_crop = init_img_crop.resize([128,128], resample=Image.BILINEAR)
last_gt = init_gt
# init_img_crop, init_gt_norm, win_loc, scale= crop_search_region(init_img, init_gt, 300, mean_rgb=128)
# last_gt = np.int32([init_gt_norm[0]*init_img_crop.height*scale[0]+win_loc[0], init_gt_norm[1]*init_img_crop.width*scale[1]+win_loc[1],
# init_gt_norm[2]*init_img_crop.height*scale[0] + win_loc[0], init_gt_norm[3]*init_img_crop.width*scale[1] + win_loc[1]])
# gt = np.array([74, 22, 82, 105], dtype=np.float32)
# gt = [(gt[1])/init_img.height, (gt[0])/init_img.width, (gt[3]+gt[1])/init_img.height, (gt[2]+gt[0])/init_img.width]
# init_img = init_img.resize([300,300])
init_img_array = np.array(init_img_crop)
init_img_array1 = np.expand_dims(init_img_array, axis=0)
init_img_array1 = np.expand_dims(init_img_array1, axis=0)
#time1 = time.time()
initfeatures1 = sess.run(initFeature, feed_dict={initImageOp: init_img_array1})
#time2 = time.time()
#print time2 - time1
#Train:
# if init_training:
# for i in range(20):
# train_images, train_init_images, train_gt = generate_init_training_samples(init_img, init_gt, 300)
# featuredict1 = sess.run(featureDict, feed_dict={imageOp: train_images})
# region_map0 = featuredict1['feature_maps0']
# region_map1 = featuredict1['feature_maps1']
#
# initfeatures2 = np.tile(initfeatures1, [20, 1, 1, 1])
# loss = sess.run(loss_op,
# feed_dict={featureOp0: region_map0, featureOp1: region_map1, initFeatureOp: initfeatures2,
# train_gt_boxOp: train_gt})
# if loss < 2:
# break
#
#print("Training Iter %d, Loss: %f"%(i, loss))
#region_map0_record = region_map0.copy()
#region_map1_record = region_map1.copy()
# region_map2_record = region_map2.copy()
# region_map3_record = region_map3.copy()
# region_map4_record = region_map4.copy()
# region_map5_record = region_map5.copy()
#train_gt_record = train_gt.copy()
save_path = 'result_img/' +title + '/'
if not os.path.isdir(save_path):
os.mkdir(save_path)
update_count = 0
avg_score = 0
bBoxes = np.zeros((num_frames,4))
bBoxes[0,:] = init_gt
t_start = time.time()
for i in range(2,num_frames):
frame_path = image_path + frame_list[i-1]
cur_ori_img = Image.open(frame_path)
if expand_channel:
cur_ori_img = np.array(cur_ori_img)
cur_ori_img = np.expand_dims(cur_ori_img, axis=2)
cur_ori_img = np.repeat(cur_ori_img, repeats=3, axis=2)
cur_ori_img = Image.fromarray(cur_ori_img)
cropped_img, last_gt_norm, win_loc, scale = crop_search_region(cur_ori_img, last_gt, 300, mean_rgb=128)
cur_img_array = np.array(cropped_img)
detection_box, scores = sess.run([pre_box_tensor, scores_tensor],
feed_dict={input_cur_image: cur_img_array, input_init_image: init_img_array})
#detection_box = detection_box[0]
detection_box[:,0] = detection_box[:,0] * scale[0]+ win_loc[0]
detection_box[:,1] = detection_box[:,1] * scale[1]+ win_loc[1]
detection_box[:,2] = detection_box[:,2] * scale[0]+ win_loc[0]
detection_box[:,3] = detection_box[:,3] * scale[1] + win_loc[1]
# cur_ori_img_array = np.array(cur_ori_img)
# for tmp in range(20):
# cv2.rectangle(cur_ori_img_array,(detection_box[tmp,1],detection_box[tmp,0]), (detection_box[tmp,3],detection_box[tmp,2]),(0,0,255),thickness=1)
# cv2.imshow("heh",cur_ori_img_array)
# cv2.waitKey(0)
# #
# if scores[0,0] > 0.2:
# center_box = [(detection_box[:,0] + detection_box[:,2]) / 2.0, (detection_box[:,1] + detection_box[:,3]) / 2.0]
# center_box = np.array(center_box)
# distances = np.zeros((100,))
# last_cx = (last_gt[1] + last_gt[3])/2.0
# last_cy = (last_gt[0] + last_gt[2]) / 2.0
# for tmp in range(100):
# distances[tmp] = ((center_box[0,tmp] -last_cy)**2 + (center_box[1,tmp] - last_cx)**2) ** 0.5
# distances = distances.reshape((1,100))
# distances = 1.0 / distances
# scores_ = scores + distances * 0.5
# index = np.argsort(scores_,axis=1)
# detection_box = detection_box[index[0][-1]]
# else:
# index = np.argsort(scores,axis=1)
# detection_box = detection_box[0]
# detection_box = detection_box[0]
rank = np.argsort(scores)
k = 50
candidates = rank[0,-k:]
flowfile = '/home/xiaobai/dataset/OTB_flow/%s/img/%04d.mat'%(title,i-1)
flow = sio.loadmat(flowfile)
flowvalue = flow['flow'].copy()
flow_x = flowvalue[:, :, 0]
flow_y = flowvalue[:, :, 1]
flow_x = cv2.resize(flow_x,(cur_ori_img.width, cur_ori_img.height))
flow_y = cv2.resize(flow_y,(cur_ori_img.width, cur_ori_img.height))
py1 = int(last_gt[0])
px1 = int(last_gt[1])
py2 = int(last_gt[2])
px2 = int(last_gt[3])
flow_x_region = flow_x[py1:py2, px1:px2].flatten()
flow_y_region = flow_y[py1:py2, px1:px2].flatten()
xx, yy = np.meshgrid(np.arange(px1, px2, 1), np.arange(py1, py2, 1))
xx_flat = xx.flatten()
yy_flat = yy.flatten()
xx_next = xx_flat + flow_x_region
yy_next = yy_flat + flow_y_region
#k = 20
pixel_count = np.zeros((k,))
for ii in range(k):
bb = detection_box[candidates[ii],:].copy()
flags = xx_next >= bb[1]
flags = np.logical_and(flags, xx_next <= bb[3])
flags = np.logical_and(flags, yy_next >= bb[0])
flags = np.logical_and(flags, yy_next <= bb[2])
pixel_count[ii] = np.sum(flags)
threshold = 0.6
while threshold >= 0.2:#0.2:
passed = pixel_count > ((py2-py1) * (px2-px1) * threshold)
if np.sum(passed) > 0:
candidates_left = candidates[passed]
max_idx = candidates_left[np.argmax(scores[0, candidates_left])]
break
else:
threshold -= 0.1
if np.sum(passed) == 0:
max_idx = 0
detection_box = detection_box[max_idx]
show_res(cur_img_array, detection_box, '1')
#print('Score: %f'%scores[0,max_idx])
avg_score = avg_score + 1.0/(i-1) * (scores[0,max_idx]-avg_score)
if scores[0,max_idx] > 0.5:#scores[0,max_idx] > 0.5: #or (i > 10 and avg_score < 0.3 and scores[0,max_idx] > 0.4):
# detection_box = [detection_box[0]*scale[0], detection_box[1]*scale[1], detection_box[2]*scale[0], detection_box[3]*scale[1]]
# last_gt = np.int32([detection_box[0] + win_loc[0], detection_box[1] + win_loc[1],
# detection_box[2] + win_loc[0], detection_box[3] + win_loc[1]])
y1 = detection_box[0]
x1 = detection_box[1]
y2 = detection_box[2]
x2 = detection_box[3]
if x1 < 0:
x1 = 0
x2 = x1 + (last_gt[3] - last_gt[1])
if y1 < 0:
y1 = 0
y2 = y1 + (last_gt[2] - last_gt[0])
if x2 >= cur_ori_img.width:
x2 = cur_ori_img.width - 1
x1 = x2 - (last_gt[3] - last_gt[1])
if y2 >= cur_ori_img.height:
y2 = cur_ori_img.height - 1
y1 = y2 - (last_gt[2] - last_gt[0])
last_gt = np.int32([y1, x1, y2, x2])
#last_gt = np.int32(detection_box)
cur_ori_img_array = np.array(cur_ori_img)
show_res(cur_ori_img_array, np.array(last_gt, dtype=np.int32), '2', score=[scores[0,max_idx],avg_score], save_path=save_path,
frame_id=i)
# for i in range(1):
# # if avg_score < 0.75 or i < 10:
# # train_images, train_init_images1, train_gt = generate_init_training_samples(cur_ori_img, last_gt, 300)
# # featuredict1 = sess.run(featureDict, feed_dict={imageOp: train_images})
# # else:
# train_images, train_init_images1, train_gt = generate_init_training_samples(cur_ori_img, last_gt, 300,batch_size=5)
# featuredict1 = sess.run(featureDict1, feed_dict={imageOp1: train_images})
#
# region_map0 = featuredict1['feature_maps0']
# region_map1 = featuredict1['feature_maps1']
#
# region_map0_record = np.concatenate((region_map0_record,region_map0), axis=0)
# region_map1_record = np.concatenate((region_map1_record,region_map1), axis=0)
# train_gt_record = np.concatenate((train_gt_record,train_gt),axis=0)
else:
# cx = (detection_box[1]*scale[1] + detection_box[3]*scale[1]) / 2.0 + win_loc[1]
# cy = (detection_box[0]*scale[0] + detection_box[2]*scale[0]) / 2.0 + win_loc[0]
cx = (detection_box[1] + detection_box[3]) / 2.0
cy = (detection_box[0] + detection_box[2]) /2.0
x1 = cx - (last_gt[3] - last_gt[1]) / 2.0
x2 = cx + (last_gt[3] - last_gt[1]) / 2.0
y1 = cy - (last_gt[2] - last_gt[0]) / 2.0
y2 = cy + (last_gt[2] - last_gt[0]) / 2.0
if x1 < 0:
x1 = 0
x2 = x1 + (last_gt[3] - last_gt[1])
if y1 < 0:
y1 = 0
y2 = y1 + (last_gt[2] - last_gt[0])
if x2 >= cur_ori_img.width:
x2 = cur_ori_img.width - 1
x1 = x2 - (last_gt[3] - last_gt[1])
if y2 >= cur_ori_img.height:
y2 = cur_ori_img.height - 1
y1 = y2 - (last_gt[2] - last_gt[0])
last_gt = np.int32([y1,x1,y2,x2])
cur_ori_img_array = np.array(cur_ori_img)
show_res(cur_ori_img_array, np.array(last_gt, dtype=np.int32), '2', score=[scores[0,max_idx],avg_score], save_path=save_path,
frame_id=i)
#update_count += 1
bBoxes[i-1,:] = last_gt
bBoxes[:,2] = bBoxes[:,2] - bBoxes[:,0]
bBoxes[:,3] = bBoxes[:,3] - bBoxes[:,1]
bBoxes_ = np.zeros((num_frames,4))
bBoxes_[:,0] = bBoxes[:,1]
bBoxes_[:,1] = bBoxes[:,0]
bBoxes_[:,2] = bBoxes[:,3]
bBoxes_[:,3] = bBoxes[:,2]
t_elapsed = time.time() - t_start
speed_i = num_frames / t_elapsed
speed[title_id] = speed_i
lengths[title_id], precisions[title_id], precisions_auc[title_id], ious[title_id] = _compile_results(gt, bBoxes_,20)
print str(title_id) + ' -- ' + titles[title_id] + \
' -- Precision: ' + "%.2f" % precisions[title_id] + \
' -- Precisions AUC: ' + "%.2f" % precisions_auc[title_id] + \
' -- IOU: ' + "%.2f" % ious[title_id]
print
sess.close()
tot_frames = np.sum(lengths)
mean_precision = np.sum(precisions * lengths) / tot_frames
mean_precision_auc = np.sum(precisions_auc * lengths) / tot_frames
mean_iou = np.sum(ious * lengths) / tot_frames
mean_speed = np.sum(speed * lengths) / tot_frames
print '-- Overall stats (averaged per frame) on ' + str(len(titles)) + ' videos (' + str(tot_frames) + ' frames) --'
print ' -- Precision ' + "(20 px)" + ': ' + "%.2f" % mean_precision + \
' -- Precisions AUC: ' + "%.2f" % mean_precision_auc + \
' -- IOU: ' + "%.2f" % mean_iou + \
' -- Speed: ' + "%.2f" % mean_speed + ' --'
print
if __name__ == '__main__':
tf.app.run()
|
netbox/ipam/migrations/0050_iprange.py | TheFlyingCorpse/netbox | 4,994 | 12648393 | <reponame>TheFlyingCorpse/netbox<gh_stars>1000+
# Generated by Django 3.2.5 on 2021-07-16 14:15
import django.core.serializers.json
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
import ipam.fields
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('extras', '0061_extras_change_logging'),
('tenancy', '0001_squashed_0012'),
('ipam', '0049_prefix_mark_utilized'),
]
operations = [
migrations.CreateModel(
name='IPRange',
fields=[
('created', models.DateField(auto_now_add=True, null=True)),
('last_updated', models.DateTimeField(auto_now=True, null=True)),
('custom_field_data', models.JSONField(blank=True, default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder)),
('id', models.BigAutoField(primary_key=True, serialize=False)),
('start_address', ipam.fields.IPAddressField()),
('end_address', ipam.fields.IPAddressField()),
('size', models.PositiveIntegerField(editable=False)),
('status', models.CharField(default='active', max_length=50)),
('description', models.CharField(blank=True, max_length=200)),
('role', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ip_ranges', to='ipam.role')),
('tags', taggit.managers.TaggableManager(through='extras.TaggedItem', to='extras.Tag')),
('tenant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ip_ranges', to='tenancy.tenant')),
('vrf', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ip_ranges', to='ipam.vrf')),
],
options={
'verbose_name': 'IP range',
'verbose_name_plural': 'IP ranges',
'ordering': (django.db.models.expressions.OrderBy(django.db.models.expressions.F('vrf'), nulls_first=True), 'start_address', 'pk'),
},
),
]
|
src/radish/parser/transformer.py | radish-bdd/radish2 | 182 | 12648401 | """
radish
~~~~~~
the root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import itertools
import textwrap
from pathlib import Path
from lark import Transformer
from radish.models import (
Background,
ConstantTag,
DefaultRule,
Feature,
PreconditionTag,
Rule,
Scenario,
ScenarioLoop,
ScenarioOutline,
Step,
Tag,
)
from radish.parser.errors import (
RadishFirstStepMustUseFirstLevelKeyword,
RadishScenarioOutlineExamplesInconsistentCellCount,
RadishStepDataTableInconsistentCellCount,
)
class RadishGherkinTransformer(Transformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.featurefile_contents = None
self.feature_id = None
self.__step_id = None
self.__scenario_id = None
self.__step_keyword_ctx = None
def prepare(
self,
language_spec,
featurefile_path: Path,
featurefile_contents: str,
feature_id: int,
):
"""Prepare the Transformer for the next transformation"""
self.language_spec = language_spec
self.featurefile_path = featurefile_path
self.featurefile_contents = featurefile_contents.splitlines(True)
self.feature_id = feature_id
self.__step_id = 1
self.__scenario_id = 1
def start(self, subtree):
"""Transform the root element for the radish AST"""
if len(subtree) > 0:
return subtree[0]
return None
def step_doc_string(self, subtree):
"""Transform the ``step_doc_string``-subtree for the radish AST"""
startline = subtree[0].line
endline = subtree[-1].line - 1
lines = "".join(self.featurefile_contents[startline:endline])
return textwrap.dedent(lines)
def _table_cell(self, subtree):
"""Transform a Table Cell"""
raw_cell_value = subtree[0].strip()
# remove VBAR escape sequences
cell_value = raw_cell_value.replace(r"\|", "|")
return cell_value
def _table_row(self, subtree):
"""Transform a Table Row"""
return list(subtree)
#: Transform the ``step_data_table_cell``-subtree for the radish AST
step_data_table_cell = _table_cell
#: Transform the ``step_data_table_row``-subtree for the radish AST
step_data_table_row = _table_row
def step_data_table(self, subtree):
"""Transform the ``step_data_table``-subtree for the radish AST"""
# check if all rows have the same amount of cells
table = list(subtree)
if len({len(row) for row in table}) > 1:
raise RadishStepDataTableInconsistentCellCount()
return table
def step_arguments(self, subtree):
"""Transform the ``step_arguments``-subtree for the radish AST"""
if len(subtree) == 0:
doc_string = None
data_table = None
elif len(subtree) == 2:
doc_string, data_table = subtree
elif isinstance(subtree[0], str):
doc_string = subtree[0]
data_table = None
else:
doc_string = None
data_table = subtree[0]
return doc_string, data_table
def step(self, subtree):
"""Transform the ``step``-subtree for the radish AST"""
keyword, text, (doc_string, data_table) = subtree
keyword_line = keyword.line
keyword = keyword.strip()
if self.__step_keyword_ctx is None:
if keyword not in self.language_spec.first_level_step_keywords:
raise RadishFirstStepMustUseFirstLevelKeyword()
self.__step_keyword_ctx = keyword
else:
if keyword in self.language_spec.first_level_step_keywords:
if keyword != self.__step_keyword_ctx:
self.__step_keyword_ctx = keyword
english_keyword = next(
key
for key, value in self.language_spec.keywords.items()
if value == self.__step_keyword_ctx
)
step = Step(
self.__step_id,
english_keyword,
keyword,
text,
doc_string,
data_table,
self.featurefile_path,
keyword_line,
)
# increment step id for the next step
self.__step_id += 1
return step
def scenario(self, subtree):
"""Transform the ``scenario``-subtree for the radish AST"""
tags = list(itertools.takewhile(lambda t: isinstance(t, Tag), subtree))
keyword = subtree[len(tags)]
short_description, *steps = subtree[len(tags) + 1 :]
scenario = Scenario(
self.__scenario_id,
keyword,
short_description,
tags,
self.featurefile_path,
short_description.line,
steps,
)
# increment scenario id and reset step id for the next scenario
self.__scenario_id += 1
self.__step_id = 1
self.__step_keyword_ctx = None
return scenario
#: Transform the ``example_cell``-subtree for the radish AST
example_cell = _table_cell
#: Transform the ``example_row``-subtree for the radish AST
example_row = _table_row
def examples(self, subtree):
"""Transform the ``examples``-subtree for the radish AST"""
# check if all rows have the same amount of cells
if len({len(row) for row in subtree}) > 1:
raise RadishScenarioOutlineExamplesInconsistentCellCount()
header, *rows = subtree
return [dict(zip(header, row)) for row in rows]
def scenario_outline(self, subtree):
"""Transform the ``scenario_outline``-subtree for the radish AST"""
# consume Feature Tags
tags = list(itertools.takewhile(lambda t: isinstance(t, Tag), subtree))
keyword = subtree[len(tags)]
short_description = subtree[len(tags) + 1]
steps = list(
itertools.takewhile(lambda s: isinstance(s, Step), subtree[len(tags) + 2 :])
)
examples_table = subtree[len(tags) + 2 + len(steps) :][0]
scenario_outline = ScenarioOutline(
self.__scenario_id,
keyword,
short_description,
tags,
self.featurefile_path,
short_description.line,
steps,
examples_table,
)
# increment scenario id and reset step id for the next scenario
self.__scenario_id += 1 + len(examples_table)
self.__step_id = 1
self.__step_keyword_ctx = None
return scenario_outline
def iterations(self, subtree):
"""Transform the ``scenario_loop``-subtree for the radish AST"""
return int(subtree[0])
def scenario_loop(self, subtree):
"""Transform the ``scenario_outline``-subtree for the radish AST"""
# consume Feature Tags
tags = list(itertools.takewhile(lambda t: isinstance(t, Tag), subtree))
keyword = subtree[len(tags)]
short_description = subtree[len(tags) + 1]
steps = list(
itertools.takewhile(lambda s: isinstance(s, Step), subtree[len(tags) + 2 :])
)
iterations = subtree[len(tags) + 2 + len(steps)]
scenario_loop = ScenarioLoop(
self.__scenario_id,
keyword,
short_description,
tags,
self.featurefile_path,
short_description.line,
steps,
iterations,
)
# increment scenario id and reset step id for the next scenario
self.__scenario_id += 1 + iterations
self.__step_id = 1
self.__step_keyword_ctx = None
return scenario_loop
def background(self, subtree):
"""Transform the ``background``-subtree for the radish AST"""
keyword = subtree.pop(0)
if len(subtree) == 0:
short_description = None
steps = []
elif isinstance(subtree[0], Step):
short_description = None
steps = subtree
else:
short_description, *steps = subtree
background = Background(
keyword,
short_description,
self.featurefile_path,
short_description.line if short_description else 0,
steps,
)
return background
def rule(self, subtree):
"""Transform the ``rule``-subtree for the radish AST"""
keyword = subtree.pop(0)
short_description = subtree[0]
if len(subtree) > 1:
scenarios = subtree[1:]
else:
scenarios = []
rule = Rule(
keyword,
short_description,
self.featurefile_path,
short_description.line,
scenarios,
)
# let the Scenarios know to which Rule they belong
for scenario in scenarios:
scenario.set_rule(rule)
return rule
def description(self, description_lines):
"""Transform the ``description``-subtree for the radish AST"""
return list((str(line) for line in description_lines))
def feature_body(self, subtree):
"""Transform the ``feature_body``-subtree for the radish AST"""
description, *scenarios = subtree
background, scenarios = self._expand_background_and_scenarios(scenarios)
# create DefaultRule for scenarios without a Rul.
scenarios_for_default_rule = list(
itertools.takewhile(lambda s: not isinstance(s, Rule), scenarios)
)
rules = scenarios[len(scenarios_for_default_rule) :]
if scenarios_for_default_rule:
default_rule = DefaultRule(
scenarios_for_default_rule[0].path,
scenarios_for_default_rule[0].line,
scenarios_for_default_rule,
)
# let the Scenarios in the DefaultRule know to which Rule they belong
for scenario in scenarios_for_default_rule:
scenario.set_rule(default_rule)
rules = [default_rule] + rules
return description, background, rules
def feature(self, subtree):
"""Transform the ``feature``-subtree for the radish AST"""
# consume Feature Tags
tags = list(itertools.takewhile(lambda t: isinstance(t, Tag), subtree))
keyword = subtree[len(tags)]
short_description = subtree[len(tags) + 1]
if len(subtree) > len(tags) + 2:
description, background, rules = subtree[len(tags) + 2 :][0]
else:
description = None
background = None
rules = []
feature = Feature(
self.feature_id,
keyword,
short_description,
description,
tags,
self.featurefile_path,
short_description.line,
background,
rules,
self.language_spec,
)
# let the Background know to which Feature it belongs to
if background:
background.set_feature(feature)
# let the Rules know to which Feature they belong
for rule in rules:
rule.set_feature(feature)
# add Background to all Rules
for rule in rules:
rule.set_background(background)
return feature
def tag(self, subtree):
"""Transform the ``tag``-subtree for the radish AST"""
return subtree[0]
feature_tag = tag
scenario_tag = tag
def std_tag(self, subtree):
"""Transform the ``tag``-subtree for the radish AST"""
tag_name = subtree[0]
tag = Tag(str(tag_name).strip(), self.featurefile_path, tag_name.line)
return tag
def precondition_tag(self, subtree):
"""Transform the ``precondition_tag``-subtree for the radish AST"""
feature_filename, scenario_short_description = subtree
tag = PreconditionTag(
str(feature_filename),
str(scenario_short_description),
self.featurefile_path,
feature_filename.line,
)
return tag
def constant_tag(self, subtree):
"""Transform the ``constant_tag``-subtree for the radish AST"""
key, value = subtree
tag = ConstantTag(str(key), str(value), self.featurefile_path, key.line)
return tag
def _expand_background_and_scenarios(self, scenarios):
"""Expand the given list of Scenarios into Background and Scenarios if applicable"""
background = None
if scenarios:
if isinstance(scenarios, Background):
background = scenarios
scenarios = []
elif isinstance(scenarios, Scenario):
pass
elif isinstance(scenarios[0], Background):
background = scenarios.pop(0)
return background, scenarios
|
downloads-generation/models_class1_pan_variants/exclude_data_from_training.py | ignatovmg/mhcflurry | 113 | 12648406 | """
Extract allele/peptide pairs to exclude from training data.
"""
import sys
import os
import argparse
import pandas
from mhcflurry.common import normalize_allele_name
def normalize_allele_name_or_return_unknown(s):
return normalize_allele_name(
s, raise_on_error=False, default_value="UNKNOWN")
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument("data", metavar="CSV", help="Training data")
parser.add_argument(
"--remove-filename",
action="append",
default=[],
metavar="NAME",
help="Data to drop",
required=True)
parser.add_argument(
"--remove-kind",
action="append",
default=[],
metavar="KIND",
help="Format of data to drop. For published data, use the PMID.",
choices=[
"30377561" # Koşaloğlu-Yalçın, ..., Peters. Oncoimmunology 2018 [PMID 30377561]
],
required=True)
parser.add_argument("--out", metavar="CSV", help="Result data path")
parser.add_argument(
"--out-removed", metavar="CSV", help="Write removed data to given path")
pandas.set_option('display.max_columns', 500)
LOADERS = {}
def load_30377561(filename):
# Koşaloğlu-Yalçın, ..., Peters. Oncoimmunology 2018 [PMID 30377561]
dfs = pandas.read_excel(filename, sheet_name=None)
df1 = dfs['Supp Table 5 positive & random']
result_df = []
result_df.append(df1.rename(
columns={
"mt.pep": "peptide",
"hla": "allele",
})[["allele", "peptide"]])
result_df.append(df1.rename(
columns={
"wt.pep": "peptide",
"hla": "allele",
})[["allele", "peptide"]])
df2 = dfs["Supp Table 4 viral epitopes"]
result_df.append(
df2.rename(
columns={
"Epitope": "peptide", "Restriction": "allele",
})[["allele", "peptide"]])
result_df = pandas.concat(result_df, ignore_index=True)
return result_df
LOADERS["30377561"] = load_30377561
def go(args):
df = pandas.read_csv(args.data)
print("Read training data of length %d: " % len(df))
print(df)
df["allele_peptide"] = df.allele + "~" + df.peptide
if len(args.remove_kind) != len(args.remove_filename):
parser.error(
"Number of arguments mismatch: --remove-kind [%d] != "
"--remove-filename [%d]" % (
len(args.remove_kind),
len(args.remove_filename)))
removed = []
for (i, (kind, path)) in enumerate(
zip(args.remove_kind, args.remove_filename)):
print(
"Processing file %d / %d: %s %s" % (
i + 1, len(args.remove_kind), kind, path))
to_remove = LOADERS[kind](path)
print("Remove data contains %d entries" % len(to_remove))
to_remove["normalized_allele"] = to_remove.allele.map(
normalize_allele_name_or_return_unknown)
remove_allele_peptides = set(
to_remove.normalized_allele + "~" + to_remove.peptide)
remove_mask = df.allele_peptide.isin(remove_allele_peptides)
print("Will remove %d entries." % remove_mask.sum())
removed.append(df.loc[remove_mask].copy())
df = df.loc[~remove_mask].copy()
print("New training data size: %d" % len(df))
print("Done processing.")
removed_df = pandas.concat(removed)
print("Removed %d entries in total:" % len(removed_df))
print(removed_df)
if args.out_removed:
removed_df.to_csv(args.out_removed, index=False)
print("Wrote: ", args.out_removed)
if args.out:
df.to_csv(args.out, index=False)
print("Wrote: ", args.out)
if __name__ == "__main__":
go(parser.parse_args(sys.argv[1:]))
|
templates/create_formula/languages/python2/src/main.py | antoniofilhozup/ritchie-formulas | 107 | 12648424 | <reponame>antoniofilhozup/ritchie-formulas
#!/usr/bin/python2
import os
from formula import formula
input1 = os.environ.get("RIT_INPUT_TEXT")
input2 = os.environ.get("RIT_INPUT_BOOLEAN")
input3 = os.environ.get("RIT_INPUT_LIST")
input4 = os.environ.get("RIT_INPUT_PASSWORD")
formula.Run(input1, input2, input3, input4)
|
boto3_type_annotations/boto3_type_annotations/amplify/client.py | cowboygneox/boto3_type_annotations | 119 | 12648495 | <gh_stars>100-1000
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def create_app(self, name: str, repository: str, platform: str, oauthToken: str, description: str = None, iamServiceRoleArn: str = None, environmentVariables: Dict = None, enableBranchAutoBuild: bool = None, enableBasicAuth: bool = None, basicAuthCredentials: str = None, customRules: List = None, tags: Dict = None, buildSpec: str = None) -> Dict:
pass
def create_branch(self, appId: str, branchName: str, description: str = None, stage: str = None, framework: str = None, enableNotification: bool = None, enableAutoBuild: bool = None, environmentVariables: Dict = None, basicAuthCredentials: str = None, enableBasicAuth: bool = None, tags: Dict = None, buildSpec: str = None, ttl: str = None) -> Dict:
pass
def create_domain_association(self, appId: str, domainName: str, subDomainSettings: List, enableAutoSubDomain: bool = None) -> Dict:
pass
def delete_app(self, appId: str) -> Dict:
pass
def delete_branch(self, appId: str, branchName: str) -> Dict:
pass
def delete_domain_association(self, appId: str, domainName: str) -> Dict:
pass
def delete_job(self, appId: str, branchName: str, jobId: str) -> Dict:
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_app(self, appId: str) -> Dict:
pass
def get_branch(self, appId: str, branchName: str) -> Dict:
pass
def get_domain_association(self, appId: str, domainName: str) -> Dict:
pass
def get_job(self, appId: str, branchName: str, jobId: str) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def list_apps(self, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def list_branches(self, appId: str, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def list_domain_associations(self, appId: str, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def list_jobs(self, appId: str, branchName: str, nextToken: str = None, maxResults: int = None) -> Dict:
pass
def start_job(self, appId: str, branchName: str, jobType: str, jobId: str = None, jobReason: str = None, commitId: str = None, commitMessage: str = None, commitTime: datetime = None) -> Dict:
pass
def stop_job(self, appId: str, branchName: str, jobId: str) -> Dict:
pass
def update_app(self, appId: str, name: str = None, description: str = None, platform: str = None, iamServiceRoleArn: str = None, environmentVariables: Dict = None, enableBranchAutoBuild: bool = None, enableBasicAuth: bool = None, basicAuthCredentials: str = None, customRules: List = None, buildSpec: str = None) -> Dict:
pass
def update_branch(self, appId: str, branchName: str, description: str = None, framework: str = None, stage: str = None, enableNotification: bool = None, enableAutoBuild: bool = None, environmentVariables: Dict = None, basicAuthCredentials: str = None, enableBasicAuth: bool = None, buildSpec: str = None, ttl: str = None) -> Dict:
pass
def update_domain_association(self, appId: str, domainName: str, subDomainSettings: List, enableAutoSubDomain: bool = None) -> Dict:
pass
|
deeppy/expr/nnet/dropout.py | purushothamgowthu/deeppy | 1,170 | 12648560 | import cudarray as ca
from ...base import PhaseMixin
from ..base import UnaryElementWise
class Dropout(UnaryElementWise, PhaseMixin):
def __init__(self, dropout=0.5):
self.dropout = dropout
self._tmp_mask = None
self.phase = 'train'
def __call__(self, x):
if self.dropout == 0.0:
return x
return super(Dropout, self).__call__(x)
def setup(self):
super(Dropout, self).setup()
self.mask_shape = self.shape
self._tmp_mask = ca.zeros(self.mask_shape, dtype=ca.int_)
def fprop(self):
if self.phase == 'train':
ca.less(self.dropout, ca.random.uniform(size=self.mask_shape),
self._tmp_mask)
ca.multiply(self.x.array, self._tmp_mask, self.array)
self.array *= 1.0/(1.0-self.dropout)
elif self.phase == 'test':
self.array = self.x.array
else:
raise ValueError('Invalid phase: %s' % self.phase)
def bprop(self):
ca.multiply(self.grad_array, self._tmp_mask, self.x.grad_array)
class SpatialDropout(Dropout):
def setup(self):
super(SpatialDropout, self).setup()
self.mask_shape = self.shape[:2] + (1, 1)
self._tmp_mask = ca.zeros(self.mask_shape, dtype=ca.int_)
|
test/test_jit_cuda_fuser_profiling.py | jsun94/nimble | 206 | 12648583 | <gh_stars>100-1000
import sys
sys.argv.append("--ge_config=profiling")
import os
os.environ['PYTORCH_CUDA_FUSER_DISABLE_FALLBACK'] = '1'
os.environ['PYTORCH_CUDA_FUSER_DISABLE_FMA'] = '1'
os.environ['PYTORCH_CUDA_FUSER_JIT_OPT_LEVEL'] = '0'
from test_jit_cuda_fuser import *
if __name__ == '__main__':
run_tests()
|
tests/flow/test_keyspace_accesses.py | 10088/RedisGraph | 313 | 12648616 | from common import *
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../..')
from demo import QueryInfo
graph = None
redis_con = None
GRAPH_ID = "G"
NEW_GRAPH_ID = "G2"
class testKeyspaceAccesses(FlowTestsBase):
def __init__(self):
self.env = Env(decodeResponses=True)
global graph
global redis_con
redis_con = self.env.getConnection()
graph = Graph(redis_con, GRAPH_ID)
def test00_test_data_valid_after_rename(self):
global graph
node0 = Node(node_id=0, label="L", properties={'name':'x', 'age':1})
graph.add_node(node0)
graph.flush()
redis_con.rename(GRAPH_ID, NEW_GRAPH_ID)
graph = Graph(redis_con, NEW_GRAPH_ID)
node1 = Node(node_id=1, label="L", properties={'name':'x', 'age':1})
graph.add_node(node1)
graph.flush()
query = "MATCH (n) return n"
expected_results = [[node0], [node1]]
query_info = QueryInfo(query = query, description="Tests data is valid after renaming", expected_result = expected_results)
self._assert_resultset_and_expected_mutually_included(graph.query(query), query_info)
# Graph queries should fail gracefully on accessing non-graph keys.
def test01_graph_access_on_invalid_key(self):
redis_con.set("integer_key", 5)
graph = Graph(redis_con, "integer_key")
try:
query = """MATCH (n) RETURN noneExistingFunc(n.age) AS cast"""
graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("WRONGTYPE" in str(e))
pass
# Fail gracefully on attempting a graph deletion of an empty key.
def test02_graph_delete_on_empty_key(self):
graph = Graph(redis_con, "nonexistent_key")
try:
graph.delete()
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting an error.
assert("empty key" in str(e))
pass
|
examples/plots/plot_camera_projection.py | alek5k/pytransform3d | 304 | 12648628 | <reponame>alek5k/pytransform3d
"""
=================
Camera Projection
=================
We can see the camera coordinate frame and a grid of points in the camera
coordinate system which will be projected on the sensor. From the coordinates
on the sensor we can compute the corresponding pixels.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from pytransform3d.transformations import plot_transform
from pytransform3d.camera import make_world_grid, cam2sensor, sensor2img
focal_length = 0.2
sensor_size = (0.2, 0.15)
image_size = (640, 480)
plt.figure(figsize=(12, 5))
ax = plt.subplot(121, projection="3d")
ax.set_title("Grid in 3D camera coordinate system")
ax.set_xlim((-1, 1))
ax.set_ylim((-1, 1))
ax.set_zlim((0, 2))
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
cam_grid = make_world_grid(n_points_per_line=11) - np.array([0, 0, -2, 0])
img_grid = cam_grid * focal_length
c = np.arange(len(cam_grid))
ax.scatter(cam_grid[:, 0], cam_grid[:, 1], cam_grid[:, 2], c=c)
ax.scatter(img_grid[:, 0], img_grid[:, 1], img_grid[:, 2], c=c)
plot_transform(ax)
sensor_grid = cam2sensor(cam_grid, focal_length)
img_grid = sensor2img(sensor_grid, sensor_size, image_size)
ax = plt.subplot(122, aspect="equal")
ax.set_title("Grid in 2D image coordinate system")
ax.scatter(img_grid[:, 0], img_grid[:, 1], c=c)
ax.set_xlim((0, image_size[0]))
ax.set_ylim((0, image_size[1]))
plt.show()
|
opendatatools/aqi/aqi_agent.py | solider245/OpenData | 1,179 | 12648648 | <filename>opendatatools/aqi/aqi_agent.py
# encoding: UTF-8
from opendatatools.common import get_current_day
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from opendatatools.common import RestAgent
from opendatatools.aqi.constant import city_code_map
class AQIAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
def handle_visit_limit(self):
url = ""
def get_daily_aqi(self, date):
url = "http://datacenter.mep.gov.cn/websjzx/report/list.vm"
page_no = 0
aqi_result = list()
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'V_DATE': date,
'xmlname': 1512478367400,
'roleType': 'CFCD2084',
}
rsp = self.do_request(url, data, self.proxies)
if rsp is None:
return None
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 9:
city = cols[3].text
aqi = cols[4].text
indicator = cols[5].text
date = cols[6].text
code = cols[7].text
level = cols[8].text
data.append ({
"date" : date,
"city" : city,
"aqi" : aqi,
"code" : code,
"level" : level,
"indicator" : indicator,
})
if len(data) == 0:
break;
aqi_result.extend(data)
df = pd.DataFrame(aqi_result)
return df
def get_hour_aqi(self, time):
url = "http://datacenter.mep.gov.cn/websjzx/report/list.vm"
page_no = 0
aqi_result = list()
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'xmlname': 1512382906122,
'roleType': 'CFCD2084',
'V_DATE': time,
'E_DATE' : time,
}
rsp = self.do_request(url, data, self.proxies)
if rsp is None:
return None
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 8:
city = cols[2].text
aqi = cols[3].text
indicator = cols[4].text
time = cols[5].text
code = cols[6].text
level = cols[7].text
data.append ({
"time" : time,
"city" : city,
"aqi" : aqi,
"code" : code,
"level" : level,
"indicator" : indicator,
})
if len(data) == 0:
break;
aqi_result.extend(data)
df = pd.DataFrame(aqi_result)
return df
def get_daily_aqi_onecity(self, city):
url = 'http://datacenter.mep.gov.cn/websjzx/report/list.vm'
if city not in city_code_map:
print("this city is not ready !" + city)
return None
city_code = city_code_map[city]
aqi_result = list()
page_no = 0
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'citycodes': city_code,
'citytime': "2000-01-01",
'xmlname': "1513844769596kqzllb"
}
rsp = self.do_request(url, data, self.proxies)
# 2. 开始解析返回数据,并从html中提取需要的内容
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 7:
date = cols[1].text
aqi = cols[3].text
level = cols[5].text
indicator = cols[4].text
data.append({
"date" : date,
"aqi" : aqi,
"level" : level,
"indicator" : indicator,
})
if len(data) == 0:
break;
aqi_result.extend(data)
df = pd.DataFrame(aqi_result)
return df
def get_recent_daily_aqi_onecity(self, city):
url = 'http://datacenter.mep.gov.cn/websjzx/report!list.vm'
if city not in city_code_map:
print("this city is not ready !" + city)
return None
city_code = city_code_map[city]
data = {
'citycodes': city_code,
'citytime': get_current_day(),
'xmlname': "1513844769596kqzllb"
}
rsp = self.do_request(url, data, self.proxies)
# 2. 开始解析返回数据,并从html中提取需要的内容
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 7:
date = cols[1].text
aqi = cols[3].text
level = cols[5].text
indicator = cols[4].text
data.append({
"date" : date,
"aqi" : aqi,
"level" : level,
"indicator" : indicator,
})
df = pd.DataFrame(data)
return df
def get_hour_aqi_onecity(self, city, date):
url = 'http://datacenter.mep.gov.cn/websjzx/report/list.vm'
if city not in city_code_map:
print("this city is not ready !" + city)
return None
city_code = city_code_map[city]
aqi_result = list()
page_no = 0
while True:
page_no = page_no + 1
# 1. 分页爬取数据
data = {
'pageNum': page_no,
'ctiycodes': city_code,
'citytime': date,
'xmlname': "1511257916552",
"queryflag": "close",
"customquery": "false",
"isdesignpatterns": "false",
}
rsp = self.do_request(url, data, self.proxies)
# 2. 开始解析返回数据,并从html中提取需要的内容
data = list()
soup = BeautifulSoup(rsp, "html5lib")
divs = soup.find_all('div')
for div in divs:
if div.has_attr('class') and 'report_main' in div['class']:
rows = div.table.findAll('tr')
for row in rows:
cols = row.findAll('td')
if len(cols) == 7:
time = cols[2].text
aqi = cols[4].text
city = cols[3].text
level = cols[5].text
indicator = cols[6].text
data.append({
"time" : time,
"aqi" : aqi,
"city" : city,
"level" : level,
"indicator" : indicator,
})
aqi_result.extend(data)
if len(data) < 10:
break;
df = pd.DataFrame(aqi_result)
return df
if __name__ == '__main__':
aqi = AQIAgent()
result = aqi.get_hour_aqi_onecity('北京市','2018-05-26')
print(result) |
python/runtime/local/submitter.py | hebafer/sqlflow | 4,742 | 12648650 | <filename>python/runtime/local/submitter.py
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import subprocess
import sys
from runtime import db
from runtime.dbapi import table_writer
from runtime.feature.derivation import (get_ordered_field_descs,
infer_feature_columns)
from runtime.model.db import read_metadata_from_db
from runtime.model.model import EstimatorType, Model
from runtime.step.create_result_table import (create_evaluate_table,
create_explain_table,
create_predict_table)
from runtime.step.tensorflow.evaluate import evaluate_step as tf_evaluate
from runtime.step.tensorflow.explain import explain_step as tf_explain
from runtime.step.tensorflow.explain import print_image_as_base64_html
from runtime.step.tensorflow.predict import predict_step as tf_pred
from runtime.step.tensorflow.train import train_step as tf_train
from runtime.step.xgboost.evaluate import evaluate as xgboost_evaluate
from runtime.step.xgboost.explain import explain as xgboost_explain
from runtime.step.xgboost.predict import predict as xgboost_pred
from runtime.step.xgboost.train import train as xgboost_train
def submit_local_train(datasource,
original_sql,
select,
validation_select,
estimator_string,
model_image,
feature_column_map,
label_column,
model_params,
train_params,
validation_params,
save,
load,
user=""):
"""This function run train task locally.
Args:
datasource: string
Like: odps://access_id:[email protected]/api?
curr_project=test_ci&scheme=http
select: string
The SQL statement for selecting data for train
validation_select: string
Ths SQL statement for selecting data for validation
estimator_string: string
TensorFlow estimator name, Keras class name, or XGBoost
model_image: string
Docker image used to train this model,
default: sqlflow/sqlflow:step
feature_column_map: string
A map of Python feature column IR.
label_column: string
Feature column instance describing the label.
model_params: dict
Params for training, crossponding to WITH clause
train_params: dict
Extra train params, will be passed to runtime.tensorflow.train
or runtime.xgboost.train. Optional fields:
- disk_cache: Use dmatrix disk cache if True, default: False.
- batch_size: Split data to batches and train, default: 1.
- epoch: Epochs to train, default: 1.
validation_params: dict
Params for validation.
save: string
Model name to be saved.
load: string
The pre-trained model name to load
user: string
Not used for local submitter, used in runtime.pai only.
"""
if estimator_string.lower().startswith("xgboost"):
train_func = xgboost_train
else:
train_func = tf_train
with db.connect_with_data_source(datasource) as conn:
feature_column_map, label_column = infer_feature_columns(
conn, select, feature_column_map, label_column, n=1000)
return train_func(original_sql=original_sql,
model_image=model_image,
estimator_string=estimator_string,
datasource=datasource,
select=select,
validation_select=validation_select,
model_params=model_params,
train_params=train_params,
validation_params=validation_params,
feature_column_map=feature_column_map,
label_column=label_column,
save=save,
load=load)
def submit_local_pred(datasource,
original_sql,
select,
model,
label_name,
pred_params,
result_table,
user=""):
model = Model.load_from_db(datasource, model)
if model.get_type() == EstimatorType.XGBOOST:
pred_func = xgboost_pred
else:
pred_func = tf_pred
if model.get_meta("label") is None:
train_label_desc = None
else:
train_label_desc = model.get_meta("label").get_field_desc()[0]
if pred_params is None:
extra_result_cols = []
else:
extra_result_cols = pred_params.get("predict.extra_outputs", "")
extra_result_cols = [
c.strip() for c in extra_result_cols.split(",") if c.strip()
]
with db.connect_with_data_source(datasource) as conn:
result_column_names, train_label_idx = create_predict_table(
conn, select, result_table, train_label_desc, label_name,
extra_result_cols)
pred_func(datasource=datasource,
select=select,
result_table=result_table,
result_column_names=result_column_names,
train_label_idx=train_label_idx,
model=model,
extra_result_cols=extra_result_cols)
def submit_local_evaluate(datasource,
original_sql,
select,
label_name,
model,
model_params,
result_table,
user=""):
model = Model.load_from_db(datasource, model)
if model.get_type() == EstimatorType.XGBOOST:
evaluate_func = xgboost_evaluate
validation_metrics = model_params.get("validation.metrics",
"accuracy_score")
else:
evaluate_func = tf_evaluate
validation_metrics = model_params.get("validation.metrics", "Accuracy")
conn = db.connect_with_data_source(datasource)
validation_metrics = [m.strip() for m in validation_metrics.split(",")]
result_column_names = create_evaluate_table(conn, result_table,
validation_metrics)
conn.close()
evaluate_func(datasource=datasource,
select=select,
result_table=result_table,
model=model,
label_name=label_name,
model_params=model_params,
result_column_names=result_column_names)
def submit_local_explain(datasource,
original_sql,
select,
model,
model_params,
result_table,
explainer="TreeExplainer",
user=""):
model = Model.load_from_db(datasource, model)
if model.get_type() == EstimatorType.XGBOOST:
explain_func = xgboost_explain
else:
explain_func = tf_explain
if result_table:
feature_columns = model.get_meta("features")
estimator_string = model.get_meta("class_name")
field_descs = get_ordered_field_descs(feature_columns)
feature_column_names = [fd.name for fd in field_descs]
with db.connect_with_data_source(datasource) as conn:
create_explain_table(conn, model.get_type(), explainer,
estimator_string, result_table,
feature_column_names)
explain_func(datasource=datasource,
select=select,
explainer=explainer,
model_params=model_params,
result_table=result_table,
model=model)
if not result_table:
print_image_as_base64_html("summary.png")
SQLFLOW_TO_RUN_CONTEXT_KEY_SELECT = "SQLFLOW_TO_RUN_SELECT"
SQLFLOW_TO_RUN_CONTEXT_KEY_INTO = "SQLFLOW_TO_RUN_INTO"
SQLFLOW_TO_RUN_CONTEXT_KEY_IMAGE = "SQLFLOW_TO_RUN_IMAGE"
def submit_local_run(datasource, select, image_name, params, into):
if not params:
raise ValueError("params should not be None or empty.")
subprocess_env = os.environ.copy()
update_env = {
SQLFLOW_TO_RUN_CONTEXT_KEY_SELECT: select,
SQLFLOW_TO_RUN_CONTEXT_KEY_INTO: into,
SQLFLOW_TO_RUN_CONTEXT_KEY_IMAGE: image_name
}
subprocess_env.update(update_env)
program_file_path = pathlib.Path(params[0])
if not program_file_path.is_file:
raise ValueError("{} is not a file".format(params[0]))
sub_process = None
file_ext = program_file_path.suffix
if not file_ext:
args = [program_file_path]
args.extend(params[1:])
sub_process = subprocess.run(args=args,
env=subprocess_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
elif file_ext.lower() == ".py":
args = ["python", "-m", program_file_path.stem]
args.extend(params[1:])
sub_process = subprocess.run(args=args,
env=subprocess_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
raise RuntimeError(
"The other executable except Python program is not supported yet")
if sub_process:
print(sub_process.stdout.decode("utf-8"))
if sub_process.returncode != 0:
print(sub_process.stderr.decode("utf-8"), file=sys.stderr)
raise RuntimeError("Executing {} failed.".format(params[0]))
def submit_local_show_train(datasource, model_name):
meta = read_metadata_from_db(datasource, model_name)
original_sql = meta.get("original_sql")
if not original_sql:
raise ValueError("cannot find the train SQL statement")
result_set = [(model_name, original_sql)]
header = ["Model", "Train Statement"]
writer = table_writer.ProtobufWriter(result_set, header)
for line in writer.dump_strings():
print(line)
|
indy_node/test/state_proof/test_state_proof_for_missing_data.py | Rob-S/indy-node | 627 | 12648697 | import pytest
from indy_common.constants import GET_ATTR, GET_NYM, GET_SCHEMA, GET_CLAIM_DEF, CLAIM_DEF_FROM, CLAIM_DEF_SCHEMA_REF, \
CLAIM_DEF_SIGNATURE_TYPE, SCHEMA_NAME, SCHEMA_VERSION, SCHEMA_ATTR_NAMES, JSON_LD_CONTEXT, RICH_SCHEMA, \
RICH_SCHEMA_ENCODING, RICH_SCHEMA_MAPPING, RICH_SCHEMA_CRED_DEF, RS_CRED_DEF_TYPE_VALUE, \
RS_ENCODING_TYPE_VALUE, RS_MAPPING_TYPE_VALUE, RS_SCHEMA_TYPE_VALUE, RS_CONTEXT_TYPE_VALUE, RS_ID, \
GET_RICH_SCHEMA_OBJECT_BY_ID, GET_RICH_SCHEMA_OBJECT_BY_METADATA, RS_NAME, RS_VERSION, RS_TYPE, \
RICH_SCHEMA_PRES_DEF, RS_PRES_DEF_TYPE_VALUE
from indy_node.test.rich_schema.templates import W3C_BASE_CONTEXT, RICH_SCHEMA_EX1
from indy_node.test.state_proof.helper import check_valid_proof, \
sdk_submit_operation_and_get_result
from plenum.common.constants import TARGET_NYM, TXN_TYPE, RAW, DATA
# fixtures, do not remove
from indy_node.test.attrib_txn.test_nym_attrib import \
sdk_added_raw_attribute, attributeName, attributeValue, attributeData
from plenum.common.util import randomString
def check_no_data_and_valid_proof(result):
assert result.get(DATA) is None
check_valid_proof(result)
def test_state_proof_returned_for_missing_attr(looper, nodeSetWithOneNodeResponding,
attributeName,
sdk_pool_handle,
sdk_wallet_endorser):
"""
Tests that state proof is returned in the reply for GET_ATTR transactions
"""
_, dest = sdk_wallet_endorser
get_attr_operation = {
TARGET_NYM: dest,
TXN_TYPE: GET_ATTR,
RAW: attributeName
}
result = sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser, get_attr_operation)
check_no_data_and_valid_proof(result)
def test_state_proof_returned_for_missing_nym(looper, nodeSetWithOneNodeResponding,
sdk_pool_handle,
sdk_wallet_endorser,
sdk_user_wallet_a):
"""
Tests that state proof is returned in the reply for GET_NYM transactions
"""
# Make not existing id
_, dest = sdk_user_wallet_a
dest = dest[:-3]
dest += "fff"
get_nym_operation = {
TARGET_NYM: dest,
TXN_TYPE: GET_NYM
}
result = sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser, get_nym_operation)
check_no_data_and_valid_proof(result)
def test_state_proof_returned_for_missing_schema(looper, nodeSetWithOneNodeResponding,
sdk_pool_handle,
sdk_wallet_endorser):
"""
Tests that state proof is returned in the reply for GET_SCHEMA transactions
"""
_, dest = sdk_wallet_endorser
schema_name = "test_schema"
schema_version = "1.0"
get_schema_operation = {
TARGET_NYM: dest,
TXN_TYPE: GET_SCHEMA,
DATA: {
SCHEMA_NAME: schema_name,
SCHEMA_VERSION: schema_version,
}
}
result = sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser,
get_schema_operation)
assert SCHEMA_ATTR_NAMES not in result[DATA]
check_valid_proof(result)
def test_state_proof_returned_for_missing_claim_def(looper, nodeSetWithOneNodeResponding,
sdk_pool_handle,
sdk_wallet_endorser):
"""
Tests that state proof is returned in the reply for GET_CLAIM_DEF
transactions
"""
_, dest = sdk_wallet_endorser
get_claim_def_operation = {
CLAIM_DEF_FROM: dest,
TXN_TYPE: GET_CLAIM_DEF,
CLAIM_DEF_SCHEMA_REF: 12,
CLAIM_DEF_SIGNATURE_TYPE: 'CL'
}
result = sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_endorser,
get_claim_def_operation)
check_no_data_and_valid_proof(result)
@pytest.mark.skip
# TODO fix this test so it does not rely on Indy-SDK,
# or, fix this test once Rich Schema objects are part of Indy-SDK
def test_state_proof_returned_for_missing_get_rich_schema_obj_by_id(looper,
nodeSetWithOneNodeResponding,
sdk_wallet_endorser,
sdk_pool_handle,
sdk_wallet_client):
"""
Tests that state proof is returned in the reply for GET_RICH_SCHEMA_OBJECT_BY_ID.
Use different submitter and reader!
"""
rs_id = randomString()
get_rich_schema_by_id_operation = {
TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_ID,
RS_ID: rs_id,
}
result = sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_client,
get_rich_schema_by_id_operation)
check_no_data_and_valid_proof(result)
@pytest.mark.skip
# TODO fix this test so it does not rely on Indy-SDK,
# or, fix this test once Rich Schewma objects are part of Indy-SDK
@pytest.mark.parametrize('rs_type',
[RS_CONTEXT_TYPE_VALUE, RS_SCHEMA_TYPE_VALUE, RS_ENCODING_TYPE_VALUE, RS_MAPPING_TYPE_VALUE,
RS_CRED_DEF_TYPE_VALUE, RS_PRES_DEF_TYPE_VALUE])
def test_state_proof_returned_for_missing_get_rich_schema_obj_by_metadata(looper,
nodeSetWithOneNodeResponding,
sdk_wallet_endorser,
sdk_pool_handle,
sdk_wallet_client,
rs_type):
"""
Tests that state proof is returned in the reply for GET_RICH_SCHEMA_OBJECT_BY_ID.
Use different submitter and reader!
"""
rs_name = randomString()
rs_version = '1.0'
get_rich_schema_by_metadata_operation = {
TXN_TYPE: GET_RICH_SCHEMA_OBJECT_BY_METADATA,
RS_NAME: rs_name,
RS_VERSION: rs_version,
RS_TYPE: rs_type
}
result = sdk_submit_operation_and_get_result(looper, sdk_pool_handle,
sdk_wallet_client,
get_rich_schema_by_metadata_operation)
check_no_data_and_valid_proof(result)
|
Scripts/sims4communitylib/utils/objects/common_object_ownership_utils.py | ColonolNutty/Sims4CommunityLibrary | 118 | 12648711 | <filename>Scripts/sims4communitylib/utils/objects/common_object_ownership_utils.py
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Union
from objects.components.ownable_component import OwnableComponent
from objects.game_object import GameObject
from server_commands.argument_helpers import OptionalTargetParam
from sims.sim_info import SimInfo
from sims4.commands import Command, CommandType, CheatOutput
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
from sims4communitylib.modinfo import ModInfo
from sims4communitylib.utils.sims.common_sim_name_utils import CommonSimNameUtils
from sims4communitylib.utils.sims.common_sim_utils import CommonSimUtils
class CommonObjectOwnershipUtils:
"""Utilities for manipulating the ownership of Objects.
"""
@staticmethod
def set_owning_household_id(game_object: GameObject, household_id: int) -> bool:
"""set_owning_household_id(game_object, household_id)
Set the Household that owns the Object.
:param game_object: An instance of an Object.
:type game_object: GameObject
:param household_id: The decimal identifier of a Household.
:type household_id: int
:return: True, if the Household was successfully set as the owner. False, if not.
:rtype: bool
"""
if game_object is None or household_id == -1:
return False
game_object.set_household_owner_id(household_id)
return True
@staticmethod
def get_owning_household_id(game_object: GameObject) -> int:
"""get_owning_household_id(game_object)
Retrieve the decimal identifier of the Household that owns the Object.
:param game_object: An instance of an Object.
:type game_object: GameObject
:return: The decimal identifier of the Household that owns the object.
:rtype: int
"""
if game_object is None:
return -1
return game_object.get_household_owner_id()
@staticmethod
def set_owning_sim(game_object: GameObject, sim_info: SimInfo, make_sim_sole_owner: bool=True) -> bool:
"""set_owning_sim(game_object, sim_info, make_sim_sole_owner=True)
Change the ownership of an Object to become owned by the household of a Sim and optional by the Sim themselves.
:param game_object: An instance of an Object.
:type game_object: GameObject
:param sim_info: An instance of a Sim.
:type sim_info: SimInfo
:param make_sim_sole_owner: If True, the Sim will become the sole owner in their household of the Object (In addition to the household owning it). If False, only the household will own the Object. Default is True.
:type make_sim_sole_owner: bool, optional
:return: True, if ownership was transferred successfully. False, if not.
:rtype: bool
"""
if game_object is None or sim_info is None:
return False
sim = CommonSimUtils.get_sim_instance(sim_info)
if sim is None:
return False
game_object.update_ownership(sim, make_sim_owner=make_sim_sole_owner)
return True
@staticmethod
def get_owning_sim(game_object: GameObject) -> Union[SimInfo, None]:
"""get_owning_sim(game_object)
Retrieve the Sim that owns an Object, if a Sim owns the Object.
:param game_object: An instance of an Object.
:type game_object: GameObject
:return: The SimInfo of the Sim that owns the specified Object or None if no Sim owns the Object.
:rtype: Union[SimInfo, None]
"""
if game_object is None:
return None
ownable_component: OwnableComponent = CommonObjectOwnershipUtils.get_ownable_component(game_object)
if ownable_component is None:
return None
return CommonSimUtils.get_sim_info(ownable_component.get_sim_owner_id())
@staticmethod
def get_ownable_component(game_object: GameObject) -> Union[OwnableComponent, None]:
"""get_ownable_component(game_object)
Retrieve the Ownable Component of an Object if it has one.
:param game_object: An instance of an Object.
:type game_object: GameObject
:return: The OwnableComponent of the Object or None if no OwnableComponent was found.
:rtype: Union[OwnableComponent, None]
"""
if game_object is None:
return None
if not hasattr(game_object, 'ownable_component'):
return None
return game_object.ownable_component
@Command('s4clib.change_ownership', command_type=CommandType.Live)
def _common_change_ownership(object_id: str='20359', opt_sim: OptionalTargetParam=None, _connection: int=None):
from server_commands.argument_helpers import get_optional_target
output = CheatOutput(_connection)
sim_info = CommonSimUtils.get_sim_info(get_optional_target(opt_sim, _connection))
if sim_info is None:
output('Failed, no Sim was specified or the specified Sim was not found!')
return
# noinspection PyBroadException
try:
object_id = int(object_id)
except Exception:
output('ERROR: object_id must be a number.')
return
if object_id < 0:
output('ERROR: object_id must be a positive number.')
return
output('Attempting to change the ownership of object with id \'{}\'.'.format(object_id))
from sims4communitylib.utils.objects.common_object_utils import CommonObjectUtils
game_object = CommonObjectUtils.get_game_object(object_id)
if game_object is None:
output('ERROR: No object was found with id \'{}\''.format(object_id))
return
output('Object found, attempting to change the ownership of it to {}. {}'.format(CommonSimNameUtils.get_full_name(sim_info), game_object))
try:
if CommonObjectOwnershipUtils.set_owning_sim(game_object, sim_info, make_sim_sole_owner=True):
output('Successfully changed the ownership of the object.')
else:
output('ERROR: Failed to change the ownership of the object.')
except Exception as ex:
output('ERROR: A problem occurred while attempting to change the ownership of the object. {}'.format(object_id))
CommonExceptionHandler.log_exception(ModInfo.get_identity(), 'Error occurred attempting to change the ownership of object. {}'.format(object_id), exception=ex)
output('Done changing the ownership of the object.')
|
zippy/benchmarks/src/micro/list-indexing.py | lucapele/pele-c | 319 | 12648828 | <gh_stars>100-1000
# zwei 12/03/13
# subscribe list by index
import time
def index_list(ll, num):
ll_len = len(ll)
item = 0
for t in range(num):
for i in range(ll_len):
item = (item + ll[i]) % 7
return item
def measure():
print("Start timing...")
start = time.time()
ll = [x*2 for x in range(1000)]
last_item = index_list(ll, 1000000) #1000000
print("Last item ", last_item)
duration = "%.3f\n" % (time.time() - start)
print("list-indexing: " + duration)
#warm up
for run in range(1200):
index_list([1,2,3,4,5,6,7,8,9,10], 10000)
measure() |
ISMLnextGen/postTest.py | Ravenclaw-OIer/ISML_auto_voter | 128 | 12648841 | from requests import post
headers={'ipNum':'5'}
payload={'0':'1.1.1.1:8080',
'1':'2.2.2.2:8080',
'2':'2.2.2.2:8080',
'3':'2.2.2.2:8080',
'4':'2.2.2.2:8080',}
response=post(url='http://127.0.0.1:8999/main',headers=headers,json=payload)
pass |
sleekxmpp/plugins/xep_0059/__init__.py | E-Tahta/sleekxmpp | 499 | 12648848 | <gh_stars>100-1000
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 <NAME>, <NAME>
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.plugins.base import register_plugin
from sleekxmpp.plugins.xep_0059.stanza import Set
from sleekxmpp.plugins.xep_0059.rsm import ResultIterator, XEP_0059
register_plugin(XEP_0059)
# Retain some backwards compatibility
xep_0059 = XEP_0059
|
tests/test_expression.py | talhaHavadar/dissect.cstruct | 227 | 12648896 | <reponame>talhaHavadar/dissect.cstruct
import pytest
from dissect.cstruct.expression import Expression
testdata = [
('1 * 0', 0),
('1 * 1', 1),
('7 * 8', 56),
('7*8', 56),
('7 *8', 56),
(' 7 * 8 ', 56),
('0 / 1', 0),
('1 / 1', 1),
('2 / 2', 1),
('3 / 2', 1),
('4 / 2', 2),
('1 % 1', 0),
('1 % 2', 1),
('5 % 3', 2),
('0 + 0', 0),
('1 + 0', 1),
('1 + 3', 4),
('0 - 0', 0),
('1 - 0', 1),
('0 - 1', -1),
('1 - 3', -2),
('3 - 1', 2),
('0x0 >> 0', 0x0),
('0x1 >> 0', 0x1),
('0x1 >> 1', 0x0),
('0xf0 >> 4', 0xf),
('0x0 << 4', 0),
('0x1 << 0', 1),
('0xf << 4', 0xf0),
('0 & 0', 0),
('1 & 0', 0),
('1 & 1', 1),
('1 & 2', 0),
('1 ^ 1', 0),
('1 ^ 0', 1),
('1 ^ 3', 2),
('0 | 0', 0),
('0 | 1', 1),
('1 | 1', 1),
('1 | 2', 3),
# This type of expression is not supported by the parser and will fail.
# ('4 * 1 + 1', 5),
('-42', -42),
('42 + (-42)', 0),
('A + 5', 13),
('21 - B', 8),
('A + B', 21),
]
class Consts(object):
consts = {
'A': 8,
'B': 13,
}
def id_fn(val):
if isinstance(val, (str,)):
return val
@pytest.mark.parametrize('expression, answer',
testdata,
ids=id_fn)
def test_expression(expression, answer):
parser = Expression(Consts(), expression)
assert parser.evaluate() == answer
|
algorithms/utils/spaces/discretized.py | magicly/sample-factory | 320 | 12648991 | <filename>algorithms/utils/spaces/discretized.py
from gym.spaces import Discrete
class Discretized(Discrete):
def __init__(self, n, min_action, max_action):
super().__init__(n)
self.min_action = min_action
self.max_action = max_action
def to_continuous(self, discrete_action):
step = (self.max_action - self.min_action) / (self.n - 1) # min=-1, max=1, n=11, step=0.2
action = self.min_action + discrete_action * step
return action
|
src/settings_csv.py | hodgerpodger/staketaxcsv | 140 | 12649019 | import os
# Environment variables (required for each respective report)
ALGO_HIST_INDEXER_NODE = os.environ.get("ALGO_HIST_INDEXER_NODE", "https://indexer.algoexplorerapi.io")
ALGO_INDEXER_NODE = os.environ.get("ALGO_INDEXER_NODE", "https://algoindexer.algoexplorerapi.io")
ALGO_NFDOMAINS = os.environ.get("ALGO_NFDOMAINS", "https://api.nf.domains")
ATOM_NODE = os.environ.get("ATOM_NODE", "")
COVALENT_NODE = os.environ.get("COVALENT_NODE", "https://api.covalenthq.com")
DVPN_LCD_NODE = os.environ.get("DVPN_LCD_NODE", "https://lcd.sentinel.co")
DVPN_RPC_NODE = os.environ.get("DVPN_RPC_NODE", "https://rpc.sentinel.co")
FET_NODE = os.environ.get("FET_NODE", "https://rest-fetchhub.fetch.ai")
HUAHUA_NODE = os.environ.get("HUAHUA_NODE", "")
JUNO_NODE = os.environ.get("JUNO_NODE", "")
BTSG_NODE = os.environ.get("BTSG_NODE", "https://lcd.explorebitsong.com")
STARS_NODE = os.environ.get("STARS_NODE", "")
SOL_NODE = os.environ.get("SOL_NODE", "")
TERRA_LCD_NODE = os.environ.get("TERRA_LCD_NODE", "")
LUNA2_LCD_NODE = os.environ.get("LUNA2_LCD_NODE", "https://phoenix-lcd.terra.dev")
# Optional environment variables
COVALENT_API_KEY = os.environ.get("COVALENT_API_KEY", "")
# #############################################################################
TICKER_ALGO = "ALGO"
TICKER_ATOM = "ATOM"
TICKER_DVPN = "DVPN"
TICKER_FET = "FET"
TICKER_HUAHUA = "HUAHUA"
TICKER_IOTEX = "IOTX"
TICKER_JUNO = "JUNO"
TICKER_BTSG = "BTSG"
TICKER_STARS = "STARS"
TICKER_LUNA1 = "LUNA1"
TICKER_LUNA2 = "LUNA2"
TICKER_OSMO = "OSMO"
TICKER_SOL = "SOL"
DONATION_WALLETS = set([
os.environ.get("DONATION_WALLET_ALGO", ""),
os.environ.get("DONATION_WALLET_ATOM", ""),
os.environ.get("DONATION_WALLET_FET", ""),
os.environ.get("DONATION_WALLET_HUAHUA", ""),
os.environ.get("DONATION_WALLET_IOTX", ""),
os.environ.get("DONATION_WALLET_JUNO", ""),
os.environ.get("DONATION_WALLET_BTSG", ""),
os.environ.get("DONATION_WALLET_STARS", ""),
os.environ.get("DONATION_WALLET_LUNA", ""),
os.environ.get("DONATION_WALLET_OSMO", ""),
os.environ.get("DONATION_WALLET_SOL", ""),
])
MESSAGE_ADDRESS_NOT_FOUND = "Wallet address not found"
MESSAGE_STAKING_ADDRESS_FOUND = "Staking address found. Please input the main wallet address instead."
REPORTS_DIR = os.path.dirname(os.path.realpath(__file__)) + "/_reports"
|
KG/DuEE_baseline/bin/finetune/sequence_label.py | pkulzb/Research | 1,319 | 12649043 | <reponame>pkulzb/Research
# coding: utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""sequence labeling model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import os
import time
import json
import argparse
import numpy as np
import multiprocessing
import paddle
import logging
import paddle.fluid as fluid
from six.moves import xrange
from model.ernie import ErnieModel
log = logging.getLogger(__name__)
def create_model(args, pyreader_name, ernie_config, is_prediction=False):
"""func"""
pyreader = fluid.layers.py_reader(
capacity=50,
shapes=[[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1],
[-1, args.max_seq_len, 1], [-1, args.max_seq_len, 1], [-1]],
dtypes=[
'int64', 'int64', 'int64', 'int64', 'float32', 'int64', 'int64'
],
lod_levels=[0, 0, 0, 0, 0, 0, 0],
name=pyreader_name,
use_double_buffer=True)
(src_ids, sent_ids, pos_ids, task_ids, input_mask, labels,
seq_lens) = fluid.layers.read_file(pyreader)
ernie = ErnieModel(
src_ids=src_ids,
position_ids=pos_ids,
sentence_ids=sent_ids,
task_ids=task_ids,
input_mask=input_mask,
config=ernie_config,
use_fp16=args.use_fp16)
enc_out = ernie.get_sequence_output()
emission = fluid.layers.fc(
input=enc_out,
size=args.num_labels,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-0.1, high=0.1),
regularizer=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=1e-4)),
num_flatten_dims=2)
crf_cost = fluid.layers.linear_chain_crf(
input=emission,
label=labels,
param_attr=fluid.ParamAttr(
name='crfw', learning_rate=args.crf_learning_rate),
length=seq_lens)
loss = fluid.layers.mean(x=crf_cost)
crf_decode = fluid.layers.crf_decoding(
input=emission,
param_attr=fluid.ParamAttr(name='crfw'),
length=seq_lens)
lod_labels = fluid.layers.squeeze(labels, axes=[-1])
num_chunk_types = (
(args.num_labels - 1) // (len(args.chunk_scheme) - 1)) # IOB配置
(_, _, _, num_infer, num_label, num_correct) = fluid.layers.chunk_eval(
input=crf_decode,
label=lod_labels,
chunk_scheme=args.chunk_scheme,
num_chunk_types=num_chunk_types,
seq_length=seq_lens)
"""
enc_out = fluid.layers.dropout(x=enc_out,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=enc_out,
size=args.num_labels,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name="cls_seq_label_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(name="cls_seq_label_out_b",
initializer=fluid.initializer.Constant(0.)))
infers = fluid.layers.argmax(logits, axis=2)
ret_infers = fluid.layers.reshape(x=infers, shape=[-1, 1])
lod_labels = fluid.layers.sequence_unpad(labels, seq_lens)
lod_infers = fluid.layers.sequence_unpad(infers, seq_lens)
num_chunk_types = (
(args.num_labels - 1) // (len(args.chunk_scheme) - 1)) # IOB配置
(_, _, _, num_infer, num_label,
num_correct) = fluid.layers.chunk_eval(input=lod_infers,
label=lod_labels,
chunk_scheme=args.chunk_scheme,
num_chunk_types=num_chunk_types)
labels = fluid.layers.flatten(labels, axis=2)
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=fluid.layers.flatten(logits, axis=2),
label=labels,
return_softmax=True)
input_mask = fluid.layers.flatten(input_mask, axis=2)
ce_loss = ce_loss * input_mask
loss = fluid.layers.mean(x=ce_loss)
"""
graph_vars = {
"inputs": src_ids,
"loss": loss,
"seqlen": seq_lens,
"crf_decode": crf_decode,
"num_infer": num_infer,
"num_label": num_label,
"num_correct": num_correct,
}
for k, v in graph_vars.items():
v.persistable = True
return pyreader, graph_vars
def calculate_f1(num_label, num_infer, num_correct):
"""calculate_f1"""
if num_infer == 0:
precision = 0.0
else:
precision = num_correct * 1.0 / num_infer
if num_label == 0:
recall = 0.0
else:
recall = num_correct * 1.0 / num_label
if num_correct == 0:
f1 = 0.0
else:
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def evaluate(exe, program, pyreader, graph_vars, tag_num, dev_count=1):
"""func"""
fetch_list = [
graph_vars["num_infer"].name, graph_vars["num_label"].name,
graph_vars["num_correct"].name
]
total_label, total_infer, total_correct = 0.0, 0.0, 0.0
time_begin = time.time()
pyreader.start()
while True:
try:
np_num_infer, np_num_label, np_num_correct = exe.run(
program=program, fetch_list=fetch_list)
total_infer += np.sum(np_num_infer)
total_label += np.sum(np_num_label)
total_correct += np.sum(np_num_correct)
except fluid.core.EOFException:
pyreader.reset()
break
precision, recall, f1 = calculate_f1(total_label, total_infer,
total_correct)
return precision, recall, f1
def parse_crf_ret(np_inputs, crf_decodes, np_lens):
"""parse_crf_ret"""
np_inputs = np.squeeze(np_inputs)
out = []
for index in range(len(np_lens)):
src_ids = [_id for _id in np_inputs[index][1:np_lens[index] - 1]]
tag_ids = [_id for _id in crf_decodes[index][1:np_lens[index] - 1]]
out.append((list(src_ids), list(tag_ids)))
return out
def predict(exe, test_program, test_pyreader, graph_vars, dev_count=1):
"""func"""
fetch_list = [
graph_vars["inputs"].name,
graph_vars["crf_decode"].name,
graph_vars["seqlen"].name,
]
test_pyreader.start()
res = []
while True:
try:
inputs, crf_decodes, np_lens = exe.run(program=test_program,
fetch_list=fetch_list)
#r = chunk_predict(inputs, probs, np_lens, dev_count)
res += parse_crf_ret(inputs, crf_decodes, np_lens)
except fluid.core.EOFException:
test_pyreader.reset()
break
return res
|
iot_hunter/dynamic_analysis/DynamicAnalyzer.py | byamao1/HaboMalHunter | 727 | 12649049 | <filename>iot_hunter/dynamic_analysis/DynamicAnalyzer.py
# Tencent is pleased to support the open source community by making IoTHunter available.
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import subprocess
import time
import logging
import os
import re
import json
import shutil
import hashlib
import VMControl
import ConfigManager
class DynamicBehaviors(object):
"""Dynamic Behaviors Class"""
def __init__(self):
self.filename = ''
self.md5_hash = ''
self.file_log = {
'read': [],
'write': [],
'open': [],
'unlink':[]
}
self.socket_log = {
'connect': [],
'recvfrom': [],
'sendto': [],
'bind': []
}
self.tcp_log = []
self.http_log = []
self.udp_log = []
self.irc_log = []
self.dns_log = []
self.file_read_data = {}
self.recvfrom_data = {}
self.plugins_result = {}
self.proc_log = {
'execve': [],
'clone': []
}
self.packets = []
def to_report(self):
report = {
'md5_hash': self.md5_hash,
'filename': self.filename,
'file_log': self.file_log,
'socket_log': self.socket_log,
'file_read_data': self.file_read_data,
'recvfrom_data': self.recvfrom_data,
'tcp_info': self.tcp_log,
'udp_info': self.udp_log,
'http_info': self.http_log,
'irc_info': self.irc_log,
'dns_info': self.dns_log,
'plugin_info': self.plugins_result,
'proc_info': self.proc_log,
'packets_info':self.packets
}
return json.dumps(report, indent=4)
class DynamicAnalyzer:
"""Dynamic Analyzer for Iot Malware
"""
def __init__(self):
self.strace_log = []
self.analyze_timeout = 10
def init_vm(self):
"""init vm controller configuration. """
self.vm_control = VMControl.VMController()
self.vm_control.init_config()
vmc = ConfigManager.ConfigManager()
self.strace_log_max_lines = vmc.get('analyzer', 'max_strace_lines')
self.strace_log_path = vmc.get('analyzer', 'strace_log_path')
self.tshark_path = vmc.get('analyzer', 'tshark_path')
self.tcpdump_log_path = vmc.get('analyzer', 'host_log_tcpdump')
def set_log_path(self, logpath):
"""set log path"""
self.log_path = logpath
def parse_strace_log(self, log_path):
"""parse strace log."""
line_count = 0
self.strace_log_path = log_path
with open(self.strace_log_path, 'r') as log_file:
for line in log_file.readlines():
self.strace_log.append(line)
line_count = line_count + 1
if line_count >= 20000:
break
def parse_proc_log(self, behavior_obj):
for line in self.strace_log:
if 'execve(' in line:
behavior_obj.proc_log['execve'].append(
line[line.find('execve'):-1])
if 'clone(' in line:
behavior_obj.proc_log['clone'].append(
line[line.find('clone'):-1])
def parse_file_log(self, behavior_obj):
"""Parse file related log from strace."""
for line in self.strace_log:
# if 'read(' in line:
# behavior_obj.file_log['read'].append(
# line[line.find('read'):-1])
if 'openat(' in line:
behavior_obj.file_log['open'].append(
line[line.find('openat'):-1])
if 'unlink(' in line:
behavior_obj.file_log['unlink'].append(
line[line.find('unlink'):-1])
# if 'write(' in line:
# behavior_obj.file_log['write'].append(
# line[line.find('write'):-1])
def format_recvfrom_str(self, line, recefrom_data):
"""format recvfrom function string."""
read_data_pattern = re.compile(r'recvfrom\(.+,.+,.+,.+,.+\)')
read_func_find = read_data_pattern.search(line)
if read_func_find:
read_func_str = read_func_find.group(0)
fd = read_func_str.split(',')[0][9:]
read_byte = read_func_str.split(',')[1][2:-1]
if fd != '' and read_byte != ' ':
if recefrom_data.has_key(fd):
recefrom_data[fd] = recefrom_data[fd] + read_byte
else:
recefrom_data[fd] = read_byte
def format_read_str(self, line, file_read_data):
"""format read function args."""
read_data_pattern = re.compile(r'read\(.+,.+,.+\)')
read_func_find = read_data_pattern.search(line)
if read_func_find:
read_func_str = read_func_find.group(0)
fd = read_func_str.split(',')[0][5:]
read_byte = read_func_str.split(',')[1][2:-1]
if fd != '' and read_byte != ' ':
if file_read_data.has_key(fd):
file_read_data[fd] = file_read_data[fd] + read_byte
else:
file_read_data[fd] = read_byte
def parse_file_read_data(self, behavior_obj):
"""parse file data from strace."""
for line in self.strace_log:
if 'read(' in line:
self.format_read_str(line, behavior_obj.file_read_data)
def parse_recvfrom_data(self, behavior_obj):
"""parse recvfrom data ."""
for line in self.strace_log:
if 'recvfrom(' in line:
self.format_recvfrom_str(line, behavior_obj.recvfrom_data)
def parse_socket_log(self, behavior_obj):
"""parse socket related log from starce."""
for line in self.strace_log:
if 'connect(' in line:
behavior_obj.socket_log['connect'].append(
self.parse_ip_port(line))
if 'bind(' in line:
behavior_obj.socket_log['bind'].append(
self.parse_ip_port(line))
if 'sendto(' in line:
behavior_obj.socket_log['sendto'].append(
{'port':self.parse_ip_port(line)['port'],'addr':self.parse_ip_port(line)['addr'],'info':line[line.find('sendto'):-1]})
if 'recvfrom(' in line:
behavior_obj.socket_log['recvfrom'].append(
{'port':self.parse_ip_port(line)['port'],'addr':self.parse_ip_port(line)['addr'],'info':line[line.find('recvfrom'):-1]})
def parse_ip_port(self, log_str):
"""parse ip port from socket log."""
connect_info = {
'port': '',
'addr': ''
}
port_pattern = re.compile(r'sin_port=htons\(\d+\)')
addr_pattern = re.compile(r'inet_addr\(".+"\)')
port_result = port_pattern.search(log_str)
if port_result:
connect_info['port'] = port_result.group(0)[15:-1]
addr_result = addr_pattern.search(log_str)
if addr_result:
connect_info['addr'] = addr_result.group(0)[11:-2]
return connect_info
def fetch_strace_log(self, guest_vm):
"""fetch strace log from guset os."""
if os.path.isfile(self.strace_log_path):
os.remove(self.strace_log_path)
self.vm_control.vm_copyfrom(
guest_vm.name,
guest_vm.vm_log_path,
guest_vm.host_log_path,
guest_vm.user, guest_vm.password
)
def fetch_tcpdump_log(self, guest_vm):
"""fetch tcpdump log from guest os."""
if os.path.isfile(self.tcpdump_log_path):
os.remove(self.tcpdump_log_path)
self.vm_control.vm_copyfrom(
guest_vm.name,
guest_vm.vm_log_tcpdump,
guest_vm.host_log_tcpdump,
guest_vm.user, guest_vm.password
)
def get_analyze_file_md5(self, filepath):
try:
m = hashlib.md5(open(filepath, 'rb').read())
return m.hexdigest()
except Exception as e:
return ''
logger.error("get file md5 error.", exc_info=True)
def analyze_file(self, filepath):
"""main analyze function. """
self.strace_log = []
guest_vm = VMControl.GuestVM()
guest_vm.init_config()
self.init_vm()
# calculate md5
self.md5_hash = self.get_analyze_file_md5(filepath)
# get guest analyzer path
file_root = os.path.dirname(__file__)
guest_analyzer_path = os.path.join(file_root, 'GuestAnalyzer.py')
file_name = os.path.split(filepath)[1]
self.file_name = file_name
if self.vm_control.start_vm(guest_vm.name) == False:
logging.error('Start Guest VM Failed')
self.vm_control.vm_copyto(
guest_vm.name, filepath, guest_vm.runpath, guest_vm.user, guest_vm.password)
self.vm_control.vm_copyto(guest_vm.name, guest_analyzer_path,
guest_vm.runpath, guest_vm.user, guest_vm.password)
self.vm_control.vm_guest_run(guest_vm.name, '/bin/chmod', ' +x %s/%s' % (
guest_vm.runpath, file_name), guest_vm.user, guest_vm.password)
self.vm_control.vm_guest_run(
guest_vm.name, '/usr/bin/python',
'%s/GuestAnalyzer.py %s/%s' % (guest_vm.guest_analyzer_path,
guest_vm.guest_analyzer_path, file_name),
guest_vm.user, guest_vm.password)
time.sleep(10)
self.fetch_strace_log(guest_vm)
self.fetch_tcpdump_log(guest_vm)
self.vm_control.control_vm(guest_vm.name, 'poweroff')
time.sleep(5)
self.vm_control.vm_snap_control(guest_vm.name, 'restore', 'analysis')
def do_log_parse(self, behaviors):
"""main log parse function."""
behaviors.md5_hash = self.md5_hash
behaviors.file_name = self.file_name
# parse strace log
if os.path.isfile(self.strace_log_path):
self.parse_strace_log(self.strace_log_path)
os.remove(self.strace_log_path)
self.parse_socket_log(behaviors)
self.parse_file_log(behaviors)
self.parse_proc_log(behaviors)
self.parse_recvfrom_data(behaviors)
self.parse_file_read_data(behaviors)
# parse tcpdump info
if os.path.isfile(self.tcpdump_log_path):
self.parse_tcpdump_log(behaviors)
os.remove(self.tcpdump_log_path)
def parse_tcpdump_log(self, behaviors):
"""parse tcpdump pcap file. """
if os.path.isfile(self.tcpdump_log_path):
behaviors.tcp_log = self.tcp_info(self.tcpdump_log_path)
behaviors.http_log = self.http_info(self.tcpdump_log_path)
behaviors.udp_log = self.udp_info(self.tcpdump_log_path)
behaviors.dns_log = self.dns_info(self.tcpdump_log_path)
behaviors.irc_log = self.irc_info(self.tcpdump_log_path)
behaviors.packets = self.packets_info(self.tcpdump_log_path)
def packets_info(self, tcpdumpfile):
cmd = [self.tshark_path, '-n', '-ta', '-r', tcpdumpfile]
cmd_output = self.check_output_safe(cmd)
packet_list = []
for line in cmd_output.splitlines():
packet_list.append(line.strip().replace('\xe2\x86\x92 ',' '))
return packet_list
def check_output_safe(self, cmd):
output = ""
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
logging.error("CalledProcessError: %s", str(e))
output = e.output
return output
def filter_packets_by_protocal(self, tcpdumpfile, protocal):
"""use tshark to analyze tcpdump pcap file"""
if os.path.isfile(tcpdumpfile):
cmd = [self.tshark_path, '-Tjson', '-n', '-ta', '-r', tcpdumpfile, protocal]
cmd_output = self.check_output_safe(cmd)
json_data = json.loads(cmd_output)
packet_list = []
for line in json_data:
packet_data = {}
if 'ip' in line['_source']['layers'].keys():
packet_data['ip.src'] = line['_source']['layers']['ip']['ip.src']
packet_data['ip.dst'] = line['_source']['layers']['ip']['ip.dst']
if protocal == 'irc':
irc_info = line['_source']['layers']['irc']
if 'irc.response' in irc_info.keys():
packet_data['irc.response'] = irc_info['irc.response']
if protocal == 'http':
http_info = line['_source']['layers']['http']
if 'http.host' in http_info.keys():
packet_data['http.host'] = http_info['http.host']
if 'http.request' in http_info.keys():
packet_data['http.request'] = http_info['http.request.full_uri']
if protocal == 'dns':
packet_data.clear()
if 'dns' in line['_source']['layers'].keys():
dns_info = line['_source']['layers']['dns']
if 'Queries' in dns_info.keys():
for dns_query in dns_info['Queries'].values():
packet_data['dns_query'] = dns_query['dns.qry.name']
packet_list.append(packet_data)
return packet_list
def tcp_info(self, tcpdumpfile):
"""get tcp info"""
return self.filter_packets_by_protocal(tcpdumpfile, 'tcp')
def udp_info(self, tcpdumpfile):
"""get udp info"""
return self.filter_packets_by_protocal(tcpdumpfile, 'udp')
def irc_info(self, tcpdumpfile):
return self.filter_packets_by_protocal(tcpdumpfile, 'irc')
def http_info(self, tcpdumpfile):
return self.filter_packets_by_protocal(tcpdumpfile, 'http')
def dns_info(self, tcpdumpfile):
dns_packet = self.filter_packets_by_protocal(tcpdumpfile, 'dns')
dns_query_list = []
for line in dns_packet:
if line['dns_query'] in dns_query_list:
continue
dns_query_list.append(line['dns_query'])
return dns_query_list
if __name__ == '__main__':
# test code here
pass
|
plugin/ERCC_Analysis/code/ercc_seq_utils.py | konradotto/TS | 125 | 12649053 | <filename>plugin/ERCC_Analysis/code/ercc_seq_utils.py
# Copyright (C) 2012 Ion Torrent Systems, Inc. All Rights Reserved
# Sequence and read streaming utilities
import sys
import time
import pysam
# decorators section
def _use_and_del(d, k, v):
"""
Use a values from a dict and remove it.
"""
if k in d:
v = d[k]
del d[k]
return v
def open_read_stream(fn):
"""
Take a string or file argument and pass it to the function as an open file.
"""
def wrapped(f, *args, **kargs):
if type(f) is file: return fn(f, *args, **kargs)
elif type(f) is str: return fn(open(f, 'r', 16384), *args, **kargs)
else: raise Exception(str(f) + ' is not a string or file')
return wrapped
def value_stream(fn):
"""
Take an iterator that iterates over lines (e.g., from a file) and
return the lines stripped and split into values using delimter.
Skip blank lines and lines that start with characters in the
skip_lines string.
"""
def wrapped(f, *args, **kargs):
skip_header = _use_and_del(kargs, 'skip_header', True)
skip_blank = _use_and_del(kargs, 'skip_blank', True)
skip_lines = _use_and_del(kargs, 'skip_lines', '#')
delimiter = _use_and_del(kargs, 'delimiter', None)
lines = fn(f, *args, **kargs)
if skip_header:
header = lines.next()
for line in lines:
values = line.strip().split(delimiter)
if skip_blank and len(values) == 0: continue
if values[0][0] in skip_lines: continue
yield values
return wrapped
def metered_stream(fn):
"""
Display a progress meter to standard out for a stream.
"""
def wrapped(*args, **kargs):
meter = _use_and_del(kargs, 'meter', True)
freq = _use_and_del(kargs, 'meter_freq', 10000)
stream = fn(*args, **kargs)
# Hijack the stream and keep count of the items
if meter:
count = 0
start = time.time()
for record in stream:
yield record
if meter:
count += 1
if count % freq == 0:
tick = time.time()
sys.stderr.write('\rProcessed %d lines %.2f lines/sec' % (
count, count / (tick - start)))
sys.stderr.flush()
if meter:
tick = time.time()
sys.stderr.write('\rProcessed %d lines %.2f lines/sec\n' % (
count, count / (tick - start)))
sys.stderr.flush()
return wrapped
# end of decorators section
@open_read_stream
@value_stream
def file_values(f, *args, **kargs): return f
@metered_stream
def sam_stream(sam, meter=True, skip_unmapped=False, skip_reverse=False):
"""
Use pysam instead...
"""
stream = pysam.Samfile(sam)
for read in stream:
if skip_unmapped and read.is_unmapped: continue # skip umapped reads
if skip_reverse and read.is_reverse: continue # skip reverse reads
if read.rname != -1:
rname = stream.getrname(read.tid) # make rname visible to the caller
else:
rname = ''
yield read, rname
return
|
dl_lib/modeling/meta_arch/__init__.py | AndysonYs/DynamicRouting | 122 | 12649093 | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# import all the meta_arch, so they will be registered
from .semantic_seg import SemanticSegmentor
from .dynamic4seg import DynamicNet4Seg |
nativedroid/nativedroid/analyses/annotation_based_analysis.py | CherishAZ/Argus-SAF | 152 | 12649141 | import copy
from cStringIO import StringIO
from nativedroid.analyses.resolver.annotation import *
from nativedroid.analyses.resolver.armel_resolver import ArmelResolver
from nativedroid.analyses.resolver.jni.jni_helper import *
from nativedroid.analyses.resolver.model.__android_log_print import *
from nativedroid.protobuf.jnsaf_grpc_pb2 import *
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2018, The Argus-SAF Project"
__license__ = "Apache v2.0"
nativedroid_logger = logging.getLogger('AnnotationBasedAnalysis')
nativedroid_logger.setLevel(logging.INFO)
annotation_location = {
'from_reflection_call': '~',
'from_native': '~',
'from_class': '~'
}
class AnnotationBasedAnalysis(angr.Analysis):
"""
This class performs taint analysis based upon angr's annotation technique.
"""
def __init__(self, analysis_center, jni_method_addr, jni_method_arguments, is_native_pure, native_pure_info=None):
"""
init
:param AnalysisCenter analysis_center:
:param str jni_method_addr: address of jni method
:param str jni_method_arguments:
:param list is_native_pure: whether it is pure native and android_main type or direct type.
:param Object native_pure_info: initial SimState and native pure argument
"""
if self.project.arch.name is 'ARMEL':
self._resolver = ArmelResolver(self.project, analysis_center)
else:
raise ValueError('Unsupported architecture: %d' % self.project.arch.name)
self._hook_system_calls()
self._analysis_center = analysis_center
self._jni_method_signature = analysis_center.get_signature()
self._jni_method_addr = jni_method_addr
if is_native_pure:
self._state = self._resolver.prepare_native_pure_state(native_pure_info)
self._arguments_summary = None
else:
self._state, self._arguments_summary = self._resolver.prepare_initial_state(jni_method_arguments)
if is_native_pure:
self.cfg = self.project.analyses.CFGAccurate(fail_fast=True, starts=[self._jni_method_addr],
initial_state=self._state, context_sensitivity_level=1,
keep_state=True, normalize=True, call_depth=5)
else:
self.cfg = self.project.analyses.CFGAccurate(fail_fast=True, starts=[self._jni_method_addr],
initial_state=self._state, context_sensitivity_level=1,
keep_state=True, normalize=True, call_depth=5)
def _hook_system_calls(self):
if '__android_log_print' in self.project.loader.main_object.imports:
self.project.hook_symbol('__android_log_print', AndroidLogPrint(), replace=True)
def count_cfg_instructions(self):
"""
Count instructions size from CFG.
:return: Instructions size
:rtype: int
"""
total_instructions = 0
for func_addr, func in self.cfg.kb.functions.iteritems():
func_instructions = 0
# print func.name
for block in func.blocks:
block_instructions = len(block.instruction_addrs)
# print block, block_instructions
func_instructions += block_instructions
total_instructions += func_instructions
# print('Total INS: %d' % total_instructions)
return total_instructions
def _collect_taint_sources(self):
"""
Collect source nodes from CFG.
:param: jni_method_signature: method signature
:return: A dictionary contains source nodes with its source tags (positions, taint_tags).
:rtype: list
"""
sources_annotation = set()
if self._arguments_summary:
for _, arg_summary in self._arguments_summary.iteritems():
for annotation in arg_summary.annotations:
if isinstance(annotation, JobjectAnnotation):
worklist = list(annotation.fields_info)
while worklist:
field_info = worklist[0]
worklist = worklist[1:]
if isinstance(field_info, JobjectAnnotation):
if field_info.taint_info['is_taint'] and \
field_info.taint_info['taint_type'][0] == '_SOURCE_' and \
'_ARGUMENT_' not in field_info.taint_info['taint_type'][1]:
sources_annotation.add(annotation)
else:
worklist.extend(field_info.fields_info)
if not self._jni_method_signature.endswith(")V"):
for node in self.cfg.nodes():
if not node.is_simprocedure and \
node.block.vex.jumpkind == 'Ijk_Ret' and \
node.function_address == self._jni_method_addr:
for final_state in node.final_states:
return_value = final_state.regs.r0
for annotation in return_value.annotations:
if isinstance(annotation, JobjectAnnotation):
if annotation.taint_info['is_taint'] and \
annotation.taint_info['taint_type'][0] == '_SOURCE_' and \
'_ARGUMENT_' not in annotation.taint_info['taint_type'][1]:
sources_annotation.add(annotation)
return sources_annotation
def _collect_taint_sinks(self):
"""
Collect sink nodes from CFG.
:return: A dictionary contains sink nodes with its sink tags (positions, taint_tags).
:rtype: dict
"""
sink_nodes = {}
sink_annotations = set()
for node in self.cfg.nodes():
if node.is_simprocedure and node.name.startswith('Call'):
for final_state in node.final_states:
regs = [final_state.regs.r0,
final_state.regs.r1,
final_state.regs.r2,
final_state.regs.r3,
final_state.regs.r4,
final_state.regs.r5,
final_state.regs.r6,
final_state.regs.r7,
final_state.regs.r8,
final_state.regs.r9,
final_state.regs.r10]
for reg in regs:
# node_return_value = final_state.regs.r0
for annotation in reg.annotations:
if isinstance(annotation, JobjectAnnotation):
if annotation.taint_info['is_taint'] and \
annotation.taint_info['taint_type'][0] == '_SINK_':
sink_annotations.add(annotation)
fn = self.cfg.project.kb.functions.get(node.addr)
if fn:
ssm = self._analysis_center.get_source_sink_manager()
if ssm.is_sink(fn.name):
sink_tag = ssm.get_sink_tags(fn.name)
sink_nodes[node] = sink_tag
for sink, (positions, tags) in sink_nodes.iteritems():
input_state = sink.input_state
final_states = sink.final_states
args = self._resolver.get_taint_args(input_state, final_states, positions, tags)
if args:
nativedroid_logger.debug('tainted: %s, belong_obj: %s' % (args, sink.final_states[0].regs.r0))
for arg in args:
for annotation in arg.annotations:
sink_annotation = copy.deepcopy(annotation)
sink_annotation.taint_info['taint_type'][0] = '_SINK_'
if annotation.taint_info['is_taint'] and \
annotation.taint_info['taint_type'] == ['_SOURCE_', '_API_']:
sink_annotation.taint_info['taint_type'][1] = '_SOURCE_'
sink_annotations.add(sink_annotation)
annotations = set()
for annotation in sink_annotations:
if annotation.taint_info['is_taint'] and annotation.taint_info['taint_type'][1] == '_SOURCE_':
nativedroid_logger.info('Found taint in function %s.', self._jni_method_signature)
jnsaf_client = self._analysis_center.get_jnsaf_client()
if jnsaf_client:
request = RegisterTaintRequest(
apk_digest=jnsaf_client.apk_digest,
signature=self._analysis_center.get_signature(),
source_kind=annotation.taint_info['source_kind'],
sink_kind=annotation.taint_info['sink_kind'])
response = jnsaf_client.RegisterTaint(request)
if response.status:
nativedroid_logger.info('Registered %s as taint.', self._jni_method_signature)
else:
annotations.add(annotation)
return annotations
def gen_taint_analysis_report(self, sources, sinks):
"""
Generate the taint analysis report
:param sources: Sources annotation
:param sinks: Sinks annotation
:return: taint analysis report
"""
report_file = StringIO()
if sinks:
report_file.write(self._jni_method_signature)
report_file.write(' -> _SINK_ ')
args = set([])
for sink_annotation in sinks:
if sink_annotation.array_info['is_element']:
if sink_annotation.array_info['base_annotation'].source.startswith('arg'):
arg_index = \
re.split('arg|_', sink_annotation.array_info['base_annotation'].source)[1]
sink_location = arg_index
args.add(str(sink_location))
else:
taint_field_name = ''
anno = sink_annotation
while anno:
if anno.field_info['is_field']:
taint_field_name = '.' + anno.field_info['field_name'] + taint_field_name
if anno.taint_info['is_taint'] and anno.source and anno.source.startswith('arg'):
args.add(anno.source.split('arg')[-1] + taint_field_name)
break
anno = anno.field_info['base_annotation']
report_file.write('|'.join(args))
report_file.write('\n')
if sources:
report_file.write(self._jni_method_signature)
report_file.write(' -> _SOURCE_ ')
for source_annotation in sources:
if isinstance(source_annotation, JobjectAnnotation) and source_annotation.source.startswith('arg'):
source_location = source_annotation.source
taint_field_name = ''
worklist = list(source_annotation.fields_info)
while worklist:
field_info = worklist[0]
worklist = worklist[1:]
if field_info.taint_info['is_taint'] and \
'_ARGUMENT_' not in field_info.taint_info['taint_type'][1]:
taint_field_name += '.' + field_info.field_info['field_name']
break
elif isinstance(field_info, JobjectAnnotation):
taint_field_name += '.' + field_info.field_info['field_name']
worklist.extend(field_info.fields_info)
if taint_field_name:
report_file.write(source_location.split('arg')[-1] + taint_field_name)
return report_file.getvalue().strip()
def gen_saf_summary_report(self):
"""
Generate SAF summary report
:return: summary report
"""
args_safsu = dict()
rets_safsu = list()
if self._arguments_summary:
for arg_index, arg_summary in self._arguments_summary.iteritems():
arg_safsu = dict()
for annotation in arg_summary.annotations:
if isinstance(annotation, JobjectAnnotation) and annotation.fields_info:
for field_info in annotation.fields_info:
field_name = field_info.field_info['field_name']
field_type = field_info.obj_type.replace('/', '.')
field_locations = list()
if field_info.source in annotation_location:
field_location = annotation_location[field_info.source]
field_locations.append(field_location)
elif field_info.source.startswith('arg'):
field_location = field_info.heap
field_locations.append(field_location)
arg_safsu[field_name] = (field_type, field_locations)
args_safsu[arg_index] = arg_safsu
return_nodes = list()
for node in self.cfg.nodes():
if not node.is_simprocedure:
if node.block.vex.jumpkind == 'Ijk_Ret' and node.function_address == self._jni_method_addr:
return_nodes.append(node)
if not self._jni_method_signature.endswith(")V"):
for return_node in return_nodes:
for final_state in return_node.final_states:
return_value = final_state.regs.r0
for annotation in return_value.annotations:
if isinstance(annotation, JstringAnnotation):
# ret_type = annotation.primitive_type.split('L')[-1].replace('/', '.')
ret_type = 'java.lang.String'
ret_location = annotation_location[annotation.source]
ret_value = annotation.value
if ret_value is not None:
ret_safsu = ' ret = "' + ret_value + '"@' + ret_location
else:
ret_safsu = ' ret = ' + ret_type + '@' + ret_location
rets_safsu.append(ret_safsu)
elif isinstance(annotation, JobjectAnnotation):
if annotation.heap:
ret_value = annotation.heap
ret_safsu = ' ret = ' + ret_value
rets_safsu.append(ret_safsu)
else:
ret_type = annotation.obj_type.replace('/', '.')
ret_location = annotation_location[annotation.source]
ret_safsu = ' ret = ' + ret_type + '@' + ret_location
rets_safsu.append(ret_safsu)
report_file = StringIO()
report_file.write('`' + self._jni_method_signature + '`:' + '\n')
if args_safsu:
for arg_index, fields_safsu in args_safsu.iteritems():
arg_index = 'arg:' + str(re.split('arg|_', arg_index)[1])
for field_name, field_su in fields_safsu.iteritems():
field_type = field_su[0]
field_locations = field_su[1]
if field_locations[0] == '~':
field_safsu = arg_index + '.' + field_name + ' = ' + field_type + '@' + field_locations[0]
else:
field_safsu = arg_index + '.' + field_name + ' = ' + field_locations[0]
report_file.write(' ' + field_safsu.strip() + '\n')
if rets_safsu:
for ret_safsu in rets_safsu:
report_file.write(ret_safsu + '\n')
report_file.write(';\n')
return report_file.getvalue().strip()
def run(self):
"""
run the analysis.
:return:
"""
sources = self._collect_taint_sources()
sinks = self._collect_taint_sinks()
return sources, sinks
|
whatsapp-bot-venv/Lib/site-packages/twilio/rest/preview/deployed_devices/__init__.py | RedaMastouri/ConversationalPythonicChatBot | 1,362 | 12649152 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.version import Version
from twilio.rest.preview.deployed_devices.fleet import FleetList
class DeployedDevices(Version):
def __init__(self, domain):
"""
Initialize the DeployedDevices version of Preview
:returns: DeployedDevices version of Preview
:rtype: twilio.rest.preview.deployed_devices.DeployedDevices.DeployedDevices
"""
super(DeployedDevices, self).__init__(domain)
self.version = 'DeployedDevices'
self._fleets = None
@property
def fleets(self):
"""
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
if self._fleets is None:
self._fleets = FleetList(self)
return self._fleets
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.DeployedDevices>'
|
scale.app/scripts/lizard_metrics.py | f4rsh/SCALe | 239 | 12649153 | #!/usr/bin/env python
# Copyright (c) 2007-2018 Carnegie Mellon University.
# All Rights Reserved. See COPYRIGHT file for details.
import lizard
import scale
import argparse
import os
import sys
# Get path arg from cmd line
def getArgs():
parser = argparse.ArgumentParser(description="Gathers metrics via Lizard")
parser.add_argument(
"-p", "--pathName", help="Path in which to begin gathering metrics")
args = parser.parse_args()
if args.pathName is None:
print "Must specify path in which directory" \
+ " to parse is located using -p"
sys.exit(1)
if not os.path.exists(args.pathName):
print "Target directory " + args.pathName + " does not exist!"
sys.exit(1)
return args.pathName
def convertPath(pathName):
pathList = pathName.split('/')
if pathList[len(pathList) - 1]:
return pathList[len(pathList) - 1]
else:
return pathList[len(pathList) - 2]
def getNewFileName(newPath, oldFile):
fileList = oldFile.split('/')
pathBegin = None
for cur in range(len(fileList)):
if fileList[cur] == newPath:
pathBegin = cur
newList = fileList[pathBegin:]
finalPath = '/' + '/'.join(newList)
return finalPath
def printMetrics(srcFiles, pathName):
totSlocFolder = 0
numFiles = 0
newPath = convertPath(pathName)
lizard.analyze_file.processors.insert(0, lizard.CPreExtension)
# Insert file metrics as well as metrics for file's functions
for currFile in srcFiles:
totSloc = 0
totParams = 0
totTokens = 0
totComplexity = 0
fileMetrics = lizard.analyze_file(currFile)
numFuncs = len(fileMetrics.function_list)
fileName = getNewFileName(newPath, fileMetrics.filename)
for func in fileMetrics.function_list:
fields = [func.name, func.length, func.nloc, fileName,
"", func.cyclomatic_complexity, "",
func.parameter_count, "", "", "", func.token_count,
"", func.start_line, func.end_line]
scale.Write_Fields(map(lambda x: str(x), fields))
totSloc += func.nloc
totParams += func.parameter_count
totTokens += func.token_count
totComplexity += func.cyclomatic_complexity
if numFuncs != 0:
avgSloc = round((float(totSloc) / numFuncs), 2)
avgParams = round((float(totParams) / numFuncs), 2)
avgTokens = round((float(totTokens) / numFuncs), 2)
avgComplexity = round((float(totComplexity) / numFuncs), 2)
fields = [fileName, "", fileMetrics.nloc, "",
numFuncs, "", avgComplexity, "", avgSloc,
avgParams, "", "", avgTokens, "", ""]
else:
fields = [fileName, "", fileMetrics.nloc, "",
0, "", 0, "", 0, 0, "", "", 0, "", ""]
scale.Write_Fields(map(lambda x: str(x), fields))
totSlocFolder += fileMetrics.nloc
numFiles += 1
if numFiles != 0:
fields = [newPath, "", totSlocFolder,
"", "", "", "", "", "", "", "",
float(totSlocFolder) / numFiles,
"", "", "", ""]
scale.Write_Fields(map(lambda x: str(x), fields))
def main():
pathName = getArgs()
srcFiles = lizard.get_all_source_files([pathName], [], [])
printMetrics(srcFiles, pathName)
if __name__ == "__main__":
main()
|
libcity/pipeline/__init__.py | moghadas76/test_bigcity | 221 | 12649217 | from libcity.pipeline.pipeline import run_model, hyper_parameter, objective_function
__all__ = [
"run_model",
"hyper_parameter",
"objective_function"
]
|
third_party/infra_libs/time_functions/zulu.py | webrtc-lizp/infra-luci-client-py | 2,151 | 12649221 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides functions for parsing and outputting Zulu time."""
import datetime
import pytz
from infra_libs.time_functions import timestamp
def parse_zulu_time(string):
"""Parses a Zulu time string, returning None if unparseable."""
# Ugh https://bugs.python.org/issue19475.
zulu_format = "%Y-%m-%dT%H:%M:%S"
if '.' in string:
zulu_format += ".%f"
zulu_format += "Z"
try:
return datetime.datetime.strptime(string, zulu_format)
except ValueError:
return None
def parse_zulu_ts(string):
"""Parses Zulu time and converts into a timestamp or None."""
zuluparse = parse_zulu_time(string)
if zuluparse is None:
return None
return timestamp.utctimestamp(zuluparse)
def to_zulu_string(dt):
"""Returns a Zulu time string from a datetime.
Assumes naive datetime objects are in UTC.
Ensures the output always has a floating-point number of seconds.
"""
# Assume non-tz-aware datetimes are in UTC.
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = dt.replace(tzinfo=pytz.UTC)
# Convert datetime into UTC.
isodate = dt.astimezone(pytz.UTC).isoformat().split('+')[0]
# Add fractional seconds if not present.
if '.' not in isodate:
isodate += '.0'
return isodate + 'Z'
|
hydra-configs-torchvision/hydra_configs/torchvision/__init__.py | LaudateCorpus1/hydra-torch | 149 | 12649232 | <filename>hydra-configs-torchvision/hydra_configs/torchvision/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Generated by configen, do not edit.
# See https://github.com/facebookresearch/hydra/tree/main/tools/configen
# fmt: off
# isort:skip_file
# flake8: noqa
from packaging import version
from pkg_resources import get_distribution
import warnings
import torchvision
CONFIGS_VERSION = get_distribution('hydra-configs-torchvision').version
# checks if major.minor versions are matched. patch version is always different
if version.parse(torchvision.__version__).release[:2] != version.parse(CONFIGS_VERSION).release[:2]:
warnings.warn(f'Your config and library versions are mismatched. \n HYDRA-CONFIGS-TORCHVISION VERSION: {CONFIGS_VERSION}, \n TORCHVISION VERSION: {torchvision.__version__}. \n Please install the matching configs for reliable functionality.')
|
var/spack/repos/builtin/packages/librmm/package.py | kkauder/spack | 2,360 | 12649236 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Librmm(CMakePackage):
"""RMM: RAPIDS Memory Manager. Achieving optimal
performance in GPU-centric workflows frequently requires
customizing how host and device memory are allocated."""
homepage = "https://github.com/rapidsai/rmm"
url = "https://github.com/rapidsai/rmm/archive/v0.15.0.tar.gz"
version('0.15.0', sha256='599f97b95d169a90d11296814763f7e151a8a1e060ba10bc6c8f4684a5cd7972')
depends_on('[email protected]:')
|
selim_sef/dataset/dense_transform.py | ktncktnc/SpaceNet_Off_Nadir_Solutions | 164 | 12649239 | <filename>selim_sef/dataset/dense_transform.py
import math
import random
import cv2
cv2.setNumThreads(0)
import numpy as np
import torch
from numpy.core.multiarray import ndarray
_DEFAULT_ALPHASTD = 0.1
_DEFAULT_EIGVAL = torch.Tensor([0.2175, 0.0188, 0.0045])
_DEFAULT_EIGVEC = torch.Tensor([[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]])
_DEFAULT_BCS = [0.2, 0.2, 0.2]
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, sample):
sample["img"] = self.normalize(sample["img"], self.mean, self.std)
if "imgs" in sample:
sample["imgs"] = [self.normalize(img, self.mean, self.std) for img in sample["imgs"]]
return sample
def normalize(self, tensor, mean, std):
if not (torch.is_tensor(tensor) and tensor.ndimension() == 3):
raise TypeError('tensor is not a torch image.')
for t, m, s in zip(tensor, mean, std):
t.sub_(m).div_(s)
return tensor
class HFlip(object):
def __call__(self, sample):
if random.random() < 0.5:
try:
sample["img"] = cv2.flip(sample["img"], 1)
except Exception as e:
print(sample["img_name"])
raise e
if sample["mask"] is not None:
sample["mask"] = cv2.flip(sample["mask"], 1)
return sample
class VFlip(object):
def __call__(self, sample):
if random.random() < 0.5:
sample["img"] = cv2.flip(sample["img"], 0)
if sample["mask"] is not None:
sample["mask"] = cv2.flip(sample["mask"], 0)
return sample
def rot90(img, factor):
img = np.rot90(img, factor)
return np.ascontiguousarray(img)
class Rotate90(object):
def __call__(self, sample):
if random.random() < 0.5:
factor = random.randint(0, 4)
sample["img"] = rot90(sample["img"], factor)
if sample["mask"] is not None:
sample["mask"] = rot90(sample["mask"], factor)
return sample
class Pad(object):
def __init__(self, block=32, mode='reflect'):
super().__init__()
self.block = block
self.mode = mode
def __call__(self, sample):
sample["img"] = pad(sample["img"], self.block, type='reflect')
if sample["mask"] is not None and sample["mask"] != []:
sample["mask"] = pad(sample["mask"], self.block, type='reflect')
return sample
def pad(image, block, type='reflect', **kwargs):
params = {}
if type == 'zero':
params = {'constant_values': 0}
type = 'constant'
x0, x1, y0, y1 = 0, 0, 0, 0
if (image.shape[1] % block) != 0:
x0 = int((block - image.shape[1] % block) / 2)
x1 = (block - image.shape[1] % block) - x0
if (image.shape[0] % block) != 0:
y0 = int((block - image.shape[0] % block) / 2)
y1 = (block - image.shape[0] % block) - y0
if len(image.shape) > 1:
return np.pad(image, ((y0, y1), (x0, x1), (0, 0)), type, **params, **kwargs)
else:
return np.pad(image, ((y0, y1), (x0, x1)), type, **params, **kwargs)
class ToTensor(object):
def __call__(self, sample):
sample["img"] = torch.from_numpy(sample["img"].transpose((2, 0, 1))).float()
sample["angle"] = torch.from_numpy(sample["angle"].transpose((2, 0, 1))).float()
if isinstance(sample["mask"], ndarray):
sample["mask"] = torch.from_numpy(sample["mask"].transpose((2, 0, 1))).float()
return sample
class ColorJitterImage(object):
def __init__(self):
self.transform = ColorJitter()
def __call__(self, sample):
if random.random() < 0.5:
sample["img"] = self.transform(sample['img'])
return sample
class LightingImage(object):
def __init__(self):
self.transform = Lighting()
def __call__(self, sample):
if random.random() < 0.5:
sample["img"] = self.transform(sample['img'])
return sample
class RandomCropAndScale(object):
def __init__(self, height, width, scale_range=(0.5, 2.0), rescale_prob=0.5, prob=1.):
self.prob = prob
self.height = height
self.width = width
self.scale_range = scale_range
self.rescale_prob = rescale_prob
def __call__(self, sample):
if random.random() > self.prob:
return sample
scale = random.uniform(self.scale_range[0], self.scale_range[1])
if random.random() > self.rescale_prob:
scale = 1.
random_state = np.random.randint(0, 10000)
sample["img"] = random_crop(sample['img'], self.height, self.width, scale, np.random.RandomState(random_state))
if sample["mask"] is not None and sample["mask"] != []:
sample["mask"] = random_crop(sample['mask'], self.height, self.width, scale,
np.random.RandomState(random_state), mode=cv2.INTER_NEAREST)
return sample
def random_crop(img, height, width, scale, random_state, mode=None):
if random_state is None:
random_state = np.random.RandomState(1234)
crop_height = height
crop_width = width
img_height, img_width = img.shape[:2]
max_height = int(min(crop_height * scale, img_height))
max_width = int(min(crop_width * scale, img_width))
adjusted_scale = scale * min(max_width / (crop_width * scale), max_height / (crop_height * scale))
crop_width = int(adjusted_scale * width)
crop_height = int(adjusted_scale * height)
start_y = random_state.randint(0, max(img_height - crop_height, 1))
start_x = random_state.randint(0, max(img_width - crop_width, 1))
crop = img[start_y:start_y + crop_height, start_x:start_x + crop_width]
if mode is None:
if 1 / adjusted_scale < 1.:
mode = cv2.INTER_AREA
else:
mode = cv2.INTER_CUBIC
if scale != 1.:
img = cv2.resize(crop, (width, height), interpolation=mode)
else:
img = crop
return img
def shift_scale_rotate(img, angle, scale, dx, dy, borderMode=cv2.BORDER_CONSTANT):
height, width = img.shape[:2]
cc = math.cos(angle / 180 * math.pi) * scale
ss = math.sin(angle / 180 * math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx * width, height / 2 + dy * height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_NEAREST,
borderMode=borderMode)
return img
class RandomRotate(object):
def __init__(self, angle=15, prob=0.3):
self.prob = prob
self.angle = angle
def __call__(self, sample):
if random.random() > self.prob:
return sample
angle = random.uniform(-self.angle, self.angle)
if angle == 0:
return sample
sample["img"] = shift_scale_rotate(sample['img'], angle=angle, scale=1, dx=0, dy=0)
if sample["mask"] is not None and sample["mask"] != []:
sample["mask"] = shift_scale_rotate(sample['mask'], angle=angle, scale=1, dx=0, dy=0)
return sample
def _grayscale(img):
alpha = torch.Tensor([0.299, 0.587, 0.114])
return (alpha.view(3, 1, 1) * img).sum(0, keepdim=True)
def _blend(img1, img2, alpha):
return img1 * alpha + (1 - alpha) * img2
class Lighting(object):
def __init__(self, alphastd=_DEFAULT_ALPHASTD, eigval=_DEFAULT_EIGVAL, eigvec=_DEFAULT_EIGVEC):
self._alphastd = alphastd
self._eigval = eigval
self._eigvec = eigvec
def __call__(self, img):
if self._alphastd == 0.:
return img
alpha = torch.normal(torch.zeros(3), self._alphastd)
rgb = (self._eigvec * alpha * self._eigval).sum(dim=1)
return img + rgb.view(3, 1, 1)
class Saturation(object):
def __init__(self, var):
self._var = var
def __call__(self, img):
gs = _grayscale(img)
alpha = torch.FloatTensor(1).uniform_(-self._var, self._var) + 1.0
return _blend(img, gs, alpha)
class Brightness(object):
def __init__(self, var):
self._var = var
def __call__(self, img):
gs = torch.zeros(img.size())
alpha = torch.FloatTensor(1).uniform_(-self._var, self._var) + 1.0
return _blend(img, gs, alpha)
class Contrast(object):
def __init__(self, var):
self._var = var
def __call__(self, img):
gs = _grayscale(img)
gs = torch.FloatTensor(1, 1, 1).fill_(gs.mean())
alpha = torch.FloatTensor(1).uniform_(-self._var, self._var) + 1.0
return _blend(img, gs, alpha)
class ColorJitter(object):
def __init__(self, saturation=_DEFAULT_BCS[0], brightness=_DEFAULT_BCS[1], contrast=_DEFAULT_BCS[2]):
self._transforms = []
if saturation is not None:
self._transforms.append(Saturation(saturation))
if brightness is not None:
self._transforms.append(Brightness(brightness))
if contrast is not None:
self._transforms.append(Contrast(contrast))
def __call__(self, img):
if len(self._transforms) == 0:
return img
for t in random.sample(self._transforms, len(self._transforms)):
img[:3, ...] = t(img[:3,...])
return img
|
cadence/tests/test_query_workflow.py | simkimsia/temporal-python-sdk | 141 | 12649247 | import time
import pytest
from cadence.exceptions import QueryFailureException
from cadence.workerfactory import WorkerFactory
from cadence.workflow import workflow_method, signal_method, Workflow, WorkflowClient, query_method
TASK_LIST = "TestQueryWorkflow"
DOMAIN = "sample"
class GreetingException(Exception):
pass
class TestQueryWorkflow:
@query_method()
async def get_message(self) -> str:
raise NotImplementedError
@query_method()
async def get_message_fail(self) -> str:
raise NotImplementedError
@signal_method()
async def put_message(self, message):
raise NotImplementedError
@workflow_method(task_list=TASK_LIST)
async def get_greetings(self) -> list:
raise NotImplementedError
class TestQueryWorkflowImpl(TestQueryWorkflow):
def __init__(self):
self.message = ""
async def get_message(self) -> str:
return self.message
async def get_message_fail(self) -> str:
raise GreetingException("error from query")
async def put_message(self, message):
self.message = message
async def get_greetings(self) -> list:
self.message = "initial-message"
await Workflow.await_till(lambda: self.message == "done")
def test_query_workflow():
factory = WorkerFactory("localhost", 7933, DOMAIN)
worker = factory.new_worker(TASK_LIST)
worker.register_workflow_implementation_type(TestQueryWorkflowImpl)
factory.start()
client = WorkflowClient.new_client(domain=DOMAIN)
workflow: TestQueryWorkflow = client.new_workflow_stub(TestQueryWorkflow)
workflow_ec = WorkflowClient.start(workflow.get_greetings)
assert workflow.get_message() == "initial-message"
workflow.put_message("second-message")
assert workflow.get_message() == "second-message"
with pytest.raises(QueryFailureException) as exc_info:
workflow.get_message_fail()
ex = exc_info.value
assert isinstance(ex.__cause__, GreetingException)
workflow.put_message("done")
client.wait_for_close(workflow_ec)
assert workflow.get_message() == "done"
print("Stopping workers")
worker.stop()
|
tests/test_robots.py | PLPeeters/reppy | 137 | 12649252 | <filename>tests/test_robots.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
'''These are unit tests that are derived from the rfc at
http://www.robotstxt.org/norobots-rfc.txt'''
import codecs
import unittest
import mock
from requests.exceptions import SSLError
from reppy import robots
from .util import requests_fixtures
class RobotsTest(unittest.TestCase):
'''Tests about our Robots class.'''
def test_expired(self):
'''Returns true if expired.'''
with mock.patch.object(robots.time, 'time', return_value=10):
robot = robots.Robots.parse('http://example.com/robots.txt', '', expires=5)
self.assertTrue(robot.expired)
def test_not_expired(self):
'''Returns false if not expired.'''
with mock.patch.object(robots.time, 'time', return_value=10):
robot = robots.Robots.parse('http://example.com/robots.txt', '', expires=15)
self.assertFalse(robot.expired)
def test_ttl(self):
'''Returns the time remaining until expiration.'''
with mock.patch.object(robots.time, 'time', return_value=10):
robot = robots.Robots.parse('http://example.com/robots.txt', '', expires=15)
self.assertEqual(robot.ttl, 5)
def test_no_leading_user_agent(self):
'''Treats missing User-Agent as default user agent'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
Disallow: /path
Allow: /path/exception
Crawl-delay: 7
''')
self.assertNotEqual(robot.agent('agent'), None)
self.assertTrue(robot.allowed('/path/exception', 'agent'))
self.assertFalse(robot.allowed('/path', 'agent'))
self.assertTrue(robot.allowed('/', 'agent'))
self.assertEquals(robot.agent('agent').delay, 7)
def test_malformed_crawl_delay(self):
'''Handles a malformed delay.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: *
Crawl-delay: word
''')
self.assertEqual(robot.agent('agent').delay, None)
def test_honors_default_agents(self):
'''Honors the default user agent when that's all that's available.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: *
Disallow: /tmp
User-agent: other-agent
Allow: /tmp
''')
self.assertFalse(robot.allowed('/tmp', 'agent'))
self.assertTrue(robot.allowed('/path', 'agent'))
def test_honors_specific_agent(self):
'''Honors the specific user agent if a match is found.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: *
Disallow: /tmp
User-agent: agent
Allow: /tmp
''')
self.assertTrue(robot.allowed('/tmp', 'agent'))
self.assertTrue(robot.allowed('/path', 'agent'))
def test_grouping(self):
'''Multiple consecutive User-Agent lines are allowed.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: one
User-agent: two
Disallow: /tmp
''')
self.assertFalse(robot.allowed('/tmp', 'one'))
self.assertFalse(robot.allowed('/tmp', 'two'))
def test_grouping_unknown_keys(self):
'''
When we encounter unknown keys, we should disregard any grouping that may have
happened between user agent rules.
This is an example from the wild. Despite `Noindex` not being a valid directive,
we'll not consider the '*' and 'ia_archiver' rules together.
'''
rules = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: *
Disallow: /content/2/
User-agent: *
Noindex: /gb.html
Noindex: /content/2/
User-agent: ia_archiver
Disallow: /
''')
self.assertTrue(rules.allowed('/foo', 'agent'))
self.assertTrue(not rules.allowed('/bar', 'ia_archiver'))
def test_separates_agents(self):
'''Hands back an appropriate agent.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: one
Crawl-delay: 1
User-agent: two
Crawl-delay: 2
''')
self.assertNotEqual(
robot.agent('one').delay,
robot.agent('two').delay)
def test_exposes_sitemaps(self):
'''Finds and exposes sitemaps.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
Sitemap: http://a.com/sitemap.xml
Sitemap: http://b.com/sitemap.xml
''')
self.assertEqual(robot.sitemaps, [
'http://a.com/sitemap.xml', 'http://b.com/sitemap.xml'
])
def test_case_insensitivity(self):
'''Make sure user agent matches are case insensitive'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-agent: Agent
Disallow: /path
''')
self.assertFalse(robot.allowed('/path', 'agent'))
self.assertFalse(robot.allowed('/path', 'aGeNt'))
def test_empty(self):
'''Makes sure we can parse an empty robots.txt'''
robot = robots.Robots.parse('http://example.com/robots.txt', '')
self.assertEqual(robot.sitemaps, [])
self.assertTrue(robot.allowed('/', 'agent'))
def test_comments(self):
'''Robust against comments.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-Agent: * # comment saying it's the default agent
Allow: /
''')
self.assertNotEqual(robot.agent('agent'), None)
def test_accepts_full_url(self):
'''Can accept a url string.'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-Agent: agent
Disallow: /
''')
self.assertFalse(robot.allowed('http://example.com/path', 'agent'))
def test_skip_malformed_line(self):
'''If there is no colon in a line, then we must skip it'''
robot = robots.Robots.parse('http://example.com/robots.txt', '''
User-Agent: agent
Disallow /no/colon/in/this/line
''')
self.assertTrue(robot.allowed('/no/colon/in/this/line', 'agent'))
def test_fetch_status_200(self):
'''A 200 parses things normally.'''
with requests_fixtures('test_fetch_status_200'):
robot = robots.Robots.fetch('http://localhost:8080/robots.txt')
self.assertFalse(robot.allowed('/path', 'agent'))
def test_fetch_status_401(self):
'''A 401 gives us an AllowNone Robots.'''
with requests_fixtures('test_fetch_status_401'):
robot = robots.Robots.fetch('http://localhost:8080/robots.txt')
self.assertIsInstance(robot, robots.AllowNone)
def test_fetch_status_403(self):
'''A 403 gives us an AllowNone Robots.'''
with requests_fixtures('test_fetch_status_403'):
robot = robots.Robots.fetch('http://localhost:8080/robots.txt')
self.assertIsInstance(robot, robots.AllowNone)
def test_fetch_status_4XX(self):
'''A 4XX gives us an AllowAll Robots.'''
with requests_fixtures('test_fetch_status_4XX'):
robot = robots.Robots.fetch('http://localhost:8080/robots.txt')
self.assertIsInstance(robot, robots.AllowAll)
def test_fetch_status_5XX(self):
'''A server error raises an exception.'''
with requests_fixtures('test_fetch_status_5XX'):
with self.assertRaises(robots.exceptions.BadStatusCode):
robots.Robots.fetch('http://localhost:8080/robots.txt')
def test_content_too_big(self):
'''Raises an exception if the content is too big.'''
with requests_fixtures('test_content_too_big'):
with self.assertRaises(robots.exceptions.ReppyException):
robots.Robots.fetch('http://localhost:8080/robots.txt', max_size=5)
def test_ssl_exception(self):
'''Raises a ReppyException on SSL errors.'''
with mock.patch.object(robots.requests, 'get', side_effect=SSLError('Kaboom')):
with self.assertRaises(robots.exceptions.SSLException):
robots.Robots.fetch('https://localhost:8080/robots.txt')
def test_connection_exception(self):
'''Raises a ReppyException on connection errors.'''
with self.assertRaises(robots.exceptions.ConnectionException):
robots.Robots.fetch('http://localhost:8080/robots.txt')
def test_malformed_url(self):
'''Raises a ReppyException on malformed URLs.'''
with self.assertRaises(robots.exceptions.MalformedUrl):
robots.Robots.fetch('gobbledygook')
def test_excessive_redirects(self):
'''Raises a ReppyException on too many redirects.'''
with requests_fixtures('test_excessive_redirects'):
with self.assertRaises(robots.exceptions.ExcessiveRedirects):
robots.Robots.fetch('http://localhost:8080/robots.txt')
def test_robots_url_http(self):
'''Works with a http URL.'''
url = 'http://[email protected]:80/path;params?query#fragment'
expected = 'http://example.com/robots.txt'
self.assertEqual(robots.Robots.robots_url(url), expected)
def test_robots_url_https(self):
'''Works with a https URL.'''
url = 'https://[email protected]:443/path;params?query#fragment'
expected = 'https://example.com/robots.txt'
self.assertEqual(robots.Robots.robots_url(url), expected)
def test_robots_url_non_default_port(self):
'''Works with a URL with a non-default port.'''
url = 'http://[email protected]:8080/path;params?query#fragment'
expected = 'http://example.com:8080/robots.txt'
self.assertEqual(robots.Robots.robots_url(url), expected)
def test_robots_url_invalid_port(self):
'''Raises exception when given an invalid port.'''
url = 'http://:::cnn.com/'
with self.assertRaises(ValueError):
robots.Robots.robots_url(url)
def test_utf8_bom(self):
'''If there's a utf-8 BOM, we should parse it as such'''
robot = robots.Robots.parse('http://example.com/robots.txt',
codecs.BOM_UTF8 + b'''
User-Agent: agent
Allow: /path
User-Agent: other
Disallow: /path
''')
self.assertTrue(robot.allowed('http://example.com/path', 'agent'))
self.assertFalse(robot.allowed('http://example.com/path', 'other'))
def test_str_function(self):
'''
If there is valid UTF-8, str() should return a representation of the
directives.
This came out of a UnicodeDecodeError happening in Python 2, when we
were unduly decoding the bytes (via UTF-8) to unicode, then implictly
converting back to bytes via UTF-8.
'''
robot = robots.Robots.parse('http://example.com/robots.txt',
codecs.BOM_UTF8 + b'''
User-Agent: \xc3\xa4gent
Allow: /swedish-chef
''')
s = str(robot)
self.assertTrue('ägent' in s)
def test_utf16_bom(self):
'''If there's a utf-16 BOM, we should parse it as such'''
robot = robots.Robots.parse('http://example.com/robots.txt',
codecs.BOM_UTF16 + b'''
User-Agent: agent
Allow: /path
User-Agent: other
Disallow: /path
''')
self.assertTrue(robot.allowed('http://example.com/path', 'agent'))
self.assertFalse(robot.allowed('http://example.com/path', 'other'))
def test_rfc_example(self):
'''Tests the example provided by the RFC.'''
robot = robots.Robots.parse('http://www.fict.org', '''
# /robots.txt for http://www.fict.org/
# comments to <EMAIL>
User-agent: unhipbot
Disallow: /
User-agent: webcrawler
User-agent: excite
Disallow:
User-agent: *
Disallow: /org/plans.html
Allow: /org/
Allow: /serv
Allow: /~mak
Disallow: /
''')
# The unhip bot
self.assertFalse(robot.allowed('/', 'unhipbot'))
self.assertFalse(robot.allowed('/index.html', 'unhipbot'))
self.assertTrue(robot.allowed('/robots.txt', 'unhipbot'))
self.assertFalse(robot.allowed('/server.html', 'unhipbot'))
self.assertFalse(robot.allowed('/services/fast.html', 'unhipbot'))
self.assertFalse(robot.allowed('/services/slow.html', 'unhipbot'))
self.assertFalse(robot.allowed('/orgo.gif', 'unhipbot'))
self.assertFalse(robot.allowed('/org/about.html', 'unhipbot'))
self.assertFalse(robot.allowed('/org/plans.html', 'unhipbot'))
self.assertFalse(robot.allowed('/%7Ejim/jim.html', 'unhipbot'))
self.assertFalse(robot.allowed('/%7Emak/mak.html', 'unhipbot'))
# The webcrawler agent
self.assertTrue(robot.allowed('/', 'webcrawler'))
self.assertTrue(robot.allowed('/index.html', 'webcrawler'))
self.assertTrue(robot.allowed('/robots.txt', 'webcrawler'))
self.assertTrue(robot.allowed('/server.html', 'webcrawler'))
self.assertTrue(robot.allowed('/services/fast.html', 'webcrawler'))
self.assertTrue(robot.allowed('/services/slow.html', 'webcrawler'))
self.assertTrue(robot.allowed('/orgo.gif', 'webcrawler'))
self.assertTrue(robot.allowed('/org/about.html', 'webcrawler'))
self.assertTrue(robot.allowed('/org/plans.html', 'webcrawler'))
self.assertTrue(robot.allowed('/%7Ejim/jim.html', 'webcrawler'))
self.assertTrue(robot.allowed('/%7Emak/mak.html', 'webcrawler'))
# The excite agent
self.assertTrue(robot.allowed('/', 'excite'))
self.assertTrue(robot.allowed('/index.html', 'excite'))
self.assertTrue(robot.allowed('/robots.txt', 'excite'))
self.assertTrue(robot.allowed('/server.html', 'excite'))
self.assertTrue(robot.allowed('/services/fast.html', 'excite'))
self.assertTrue(robot.allowed('/services/slow.html', 'excite'))
self.assertTrue(robot.allowed('/orgo.gif', 'excite'))
self.assertTrue(robot.allowed('/org/about.html', 'excite'))
self.assertTrue(robot.allowed('/org/plans.html', 'excite'))
self.assertTrue(robot.allowed('/%7Ejim/jim.html', 'excite'))
self.assertTrue(robot.allowed('/%7Emak/mak.html', 'excite'))
# All others
self.assertFalse(robot.allowed('/', 'anything'))
self.assertFalse(robot.allowed('/index.html', 'anything'))
self.assertTrue(robot.allowed('/robots.txt', 'anything'))
self.assertTrue(robot.allowed('/server.html', 'anything'))
self.assertTrue(robot.allowed('/services/fast.html', 'anything'))
self.assertTrue(robot.allowed('/services/slow.html', 'anything'))
self.assertFalse(robot.allowed('/orgo.gif', 'anything'))
self.assertTrue(robot.allowed('/org/about.html', 'anything'))
self.assertFalse(robot.allowed('/org/plans.html', 'anything'))
self.assertFalse(robot.allowed('/%7Ejim/jim.html', 'anything'))
self.assertTrue(robot.allowed('/%7Emak/mak.html', 'anything'))
def test_after_response_hook(self):
'''Calls after_response_hook when response is received'''
state = {"called": False}
def hook(response):
state["called"] = True
self.assertEquals(response.status_code, 200)
with requests_fixtures('test_after_response_hook'):
robots.Robots.fetch(
'http://example.com/robots.txt', after_response_hook=hook)
self.assertTrue(state["called"])
def test_after_response_hook_on_error(self):
'''Calls after_response_hook when error occurs during fetch'''
state = {"called": False}
expected_url = 'http://localhost:8080/robots.txt'
def hook(response):
state["called"] = True
self.assertIsInstance(
response, robots.exceptions.ConnectionException)
self.assertEquals(response.url, expected_url)
with self.assertRaises(robots.exceptions.ConnectionException):
robots.Robots.fetch(expected_url, after_response_hook=hook)
self.assertTrue(state["called"])
def test_after_parse_hook(self):
'''Calls after_parse_hook after parsing robots.txt'''
state = {"called": False}
def hook(robots):
state["called"] = True
self.assertFalse(robots.allowed('/disallowed', 'me'))
with requests_fixtures('test_after_parse_hook'):
robots.Robots.fetch(
'http://example.com/robots.txt', after_parse_hook=hook)
self.assertTrue(state["called"])
class AllowNoneTest(unittest.TestCase):
'''Tests about the AllowNone Robots class.'''
def test_allow(self):
'''Allows nothing.'''
robot = robots.AllowNone('http://example.com/robots.txt')
self.assertFalse(robot.allowed('/', 'agent'))
def test_allow_robots_txt(self):
'''Allows robots.txt.'''
robot = robots.AllowNone('http://example.com/robots.txt')
self.assertTrue(robot.allowed('/robots.txt', 'agent'))
class AllowAllTest(unittest.TestCase):
'''Tests about the AllowAll Robots class.'''
def test_allow(self):
'''Allows nothing.'''
robot = robots.AllowAll('http://example.com/robots.txt')
self.assertTrue(robot.allowed('/', 'agent'))
|
osbrain/tests/conftest.py | RezaBehzadpour/osbrain | 176 | 12649253 | <gh_stars>100-1000
import pytest
from osbrain import run_agent
from osbrain import run_logger
from osbrain import run_nameserver
from osbrain.helper import sync_agent_logger
@pytest.fixture(scope='function')
def nsproxy(request):
ns = run_nameserver()
yield ns
ns.shutdown()
@pytest.fixture(scope='function')
def agent_logger(request):
ns = run_nameserver()
agent = run_agent('a0')
logger = run_logger('logger')
agent.set_logger(logger)
sync_agent_logger(agent=agent, logger=logger)
yield agent, logger
ns.shutdown()
|
python/helpers/pydev/pydev_tests_python/resources/_debugger_case_frame_eval.py | tgodzik/intellij-community | 695 | 12649261 | '''
Things this test checks:
- frame.f_trace is None when there are only regular breakpoints.
- The no-op tracing function is set by default (otherwise when set tracing functions have no effect).
- When stepping in, frame.f_trace must be set by the frame eval.
- When stepping over/return, the frame.f_trace must not be set on intermediate callers.
TODO:
- When frame.f_trace is set to the default tracing function, it'll become None again in frame
eval mode if not stepping (if breakpoints weren't changed).
- The tracing function in the frames that deal with unhandled exceptions must be set when dealing
with unhandled exceptions.
- The tracing function in the frames that deal with unhandled exceptions must NOT be set when
NOT dealing with unhandled exceptions.
- If handled exceptions should be dealt with, the proper tracing should be set in frame.f_trace.
'''
import sys
from _pydevd_frame_eval import pydevd_frame_tracing
def check_with_no_trace():
if False:
print('break on check_with_trace')
frame = sys._getframe()
if frame.f_trace is not None:
raise AssertionError('Expected %s to be None' % (frame.f_trace,))
if sys.gettrace() is not pydevd_frame_tracing.dummy_tracing_holder.dummy_trace_func:
raise AssertionError('Expected %s to be dummy_trace_func' % (sys.gettrace(),))
def check_step_in_then_step_return():
frame = sys._getframe()
f_trace = frame.f_trace
if f_trace.__class__.__name__ != 'SafeCallWrapper':
raise AssertionError('Expected %s to be SafeCallWrapper' % (f_trace.__class__.__name__,))
check_with_no_trace()
def check_revert_to_dummy():
check_with_no_trace()
if __name__ == '__main__':
# Check how frame eval works.
if sys.version_info[0:2] < (3, 6):
raise AssertionError('Only available for Python 3.6 onwards. Found: %s' % (sys.version_info[0:1],))
check_with_no_trace() # break on global (step over)
check_step_in_then_step_return()
import pydevd_tracing
import pydevd
# This is what a remote attach would do (should revert to the frame eval mode).
pydevd_tracing.SetTrace(pydevd.get_global_debugger().trace_dispatch)
check_revert_to_dummy()
print('TEST SUCEEDED!')
|
ecp5/trellis_import.py | antmicro/nextpnr | 865 | 12649273 | #!/usr/bin/env python3
import argparse
import json
import sys
from os import path
location_types = dict()
type_at_location = dict()
tiletype_names = dict()
gfx_wire_ids = dict()
gfx_wire_names = list()
parser = argparse.ArgumentParser(description="import ECP5 routing and bels from Project Trellis")
parser.add_argument("device", type=str, help="target device")
parser.add_argument("-p", "--constids", type=str, help="path to constids.inc")
parser.add_argument("-g", "--gfxh", type=str, help="path to gfx.h")
parser.add_argument("-L", "--libdir", type=str, action="append", help="extra Python library path")
args = parser.parse_args()
sys.path += args.libdir
import pytrellis
import database
import pip_classes
import timing_dbs
with open(args.gfxh) as f:
state = 0
for line in f:
if state == 0 and line.startswith("enum GfxTileWireId"):
state = 1
elif state == 1 and line.startswith("};"):
state = 0
elif state == 1 and (line.startswith("{") or line.strip() == ""):
pass
elif state == 1:
idx = len(gfx_wire_ids)
name = line.strip().rstrip(",")
gfx_wire_ids[name] = idx
gfx_wire_names.append(name)
def gfx_wire_alias(old, new):
assert old in gfx_wire_ids
assert new not in gfx_wire_ids
gfx_wire_ids[new] = gfx_wire_ids[old]
def wire_type(name):
longname = name
name = name.split('/')
if name[0].startswith("X") and name[1].startswith("Y"):
name = name[2:]
if name[0].endswith("_SLICE"):
return "WIRE_TYPE_SLICE"
if name[0].endswith("_DQS"):
return "WIRE_TYPE_DQS"
if name[0].endswith("_IOLOGIC"):
return "WIRE_TYPE_IOLOGIC"
if name[0].endswith("_SIOLOGIC"):
return "WIRE_TYPE_SIOLOGIC"
if name[0].endswith("_PIO"):
return "WIRE_TYPE_PIO"
if name[0].endswith("_DDRDLL"):
return "WIRE_TYPE_DDRDLL"
if name[0].endswith("_CCLK"):
return "WIRE_TYPE_CCLK"
if name[0].endswith("_EXTREF"):
return "WIRE_TYPE_EXTREF"
if name[0].endswith("_DCU"):
return "WIRE_TYPE_DCU"
if name[0].endswith("_EBR"):
return "WIRE_TYPE_EBR"
if name[0].endswith("_MULT18"):
return "WIRE_TYPE_MULT18"
if name[0].endswith("_ALU54"):
return "WIRE_TYPE_ALU54"
if name[0].endswith("_PLL"):
return "WIRE_TYPE_PLL"
if name[0].endswith("_SED"):
return "WIRE_TYPE_SED"
if name[0].endswith("_OSC"):
return "WIRE_TYPE_OSC"
if name[0].endswith("_JTAG"):
return "WIRE_TYPE_JTAG"
if name[0].endswith("_GSR"):
return "WIRE_TYPE_GSR"
if name[0].endswith("_DTR"):
return "WIRE_TYPE_DTR"
if name[0].endswith("_PCSCLKDIV0"):
return "WIRE_TYPE_PCSCLKDIV"
if name[0].endswith("_PCSCLKDIV1"):
return "WIRE_TYPE_PCSCLKDIV"
if name[0].startswith("H00"):
return "WIRE_TYPE_H00"
if name[0].startswith("H01"):
return "WIRE_TYPE_H01"
if name[0].startswith("HFI"):
return "WIRE_TYPE_H01"
if name[0].startswith("HL7"):
return "WIRE_TYPE_H01"
if name[0].startswith("H02"):
return "WIRE_TYPE_H02"
if name[0].startswith("H06"):
return "WIRE_TYPE_H06"
if name[0].startswith("V00"):
return "WIRE_TYPE_V00"
if name[0].startswith("V01"):
return "WIRE_TYPE_V01"
if name[0].startswith("V02"):
return "WIRE_TYPE_V02"
if name[0].startswith("V06"):
return "WIRE_TYPE_V06"
if name[0].startswith("G_HPBX"):
return "WIRE_TYPE_G_HPBX"
if name[0].startswith("G_VPTX"):
return "WIRE_TYPE_G_VPTX"
if name[0].startswith("L_HPBX"):
return "WIRE_TYPE_L_HPBX"
if name[0].startswith("R_HPBX"):
return "WIRE_TYPE_R_HPBX"
return "WIRE_TYPE_NONE"
def is_global(loc):
return loc.x == -2 and loc.y == -2
# Get the index for a tiletype
def get_tiletype_index(name):
if name in tiletype_names:
return tiletype_names[name]
idx = len(tiletype_names)
tiletype_names[name] = idx
return idx
constids = dict()
class BinaryBlobAssembler:
def l(self, name, ltype = None, export = False):
if ltype is None:
print("label %s" % (name,))
else:
print("label %s %s" % (name, ltype))
def r(self, name, comment):
if comment is None:
print("ref %s" % (name,))
else:
print("ref %s %s" % (name, comment))
def r_slice(self, name, length, comment):
if comment is None:
print("ref %s" % (name,))
else:
print("ref %s %s" % (name, comment))
print ("u32 %d" % (length, ))
def s(self, s, comment):
assert "|" not in s
print("str |%s| %s" % (s, comment))
def u8(self, v, comment):
if comment is None:
print("u8 %d" % (v,))
else:
print("u8 %d %s" % (v, comment))
def u16(self, v, comment):
if comment is None:
print("u16 %d" % (v,))
else:
print("u16 %d %s" % (v, comment))
def u32(self, v, comment):
if comment is None:
print("u32 %d" % (v,))
else:
print("u32 %d %s" % (v, comment))
def pre(self, s):
print("pre %s" % s)
def post(self, s):
print("post %s" % s)
def push(self, name):
print("push %s" % name)
def pop(self):
print("pop")
def get_bel_index(ddrg, loc, name):
loctype = ddrg.locationTypes[ddrg.typeAtLocation[loc]]
idx = 0
for bel in loctype.bels:
if ddrg.to_str(bel.name) == name:
return idx
idx += 1
assert loc.y == max_row # Only missing IO should be special pins at bottom of device
return None
packages = {}
pindata = []
def process_pio_db(ddrg, device):
piofile = path.join(database.get_db_root(), "ECP5", dev_names[device], "iodb.json")
with open(piofile, 'r') as f:
piodb = json.load(f)
for pkgname, pkgdata in sorted(piodb["packages"].items()):
pins = []
for name, pinloc in sorted(pkgdata.items()):
x = pinloc["col"]
y = pinloc["row"]
loc = pytrellis.Location(x, y)
pio = "PIO" + pinloc["pio"]
bel_idx = get_bel_index(ddrg, loc, pio)
if bel_idx is not None:
pins.append((name, loc, bel_idx))
packages[pkgname] = pins
for metaitem in piodb["pio_metadata"]:
x = metaitem["col"]
y = metaitem["row"]
loc = pytrellis.Location(x, y)
pio = "PIO" + metaitem["pio"]
bank = metaitem["bank"]
if "function" in metaitem:
pinfunc = metaitem["function"]
else:
pinfunc = None
dqs = -1
if "dqs" in metaitem:
tdqs = metaitem["dqs"]
if tdqs[0] == "L":
dqs = 0
elif tdqs[0] == "R":
dqs = 2048
suffix_size = 0
while tdqs[-(suffix_size+1)].isdigit():
suffix_size += 1
dqs |= int(tdqs[-suffix_size:])
bel_idx = get_bel_index(ddrg, loc, pio)
if bel_idx is not None:
pindata.append((loc, bel_idx, bank, pinfunc, dqs))
global_data = {}
quadrants = ["UL", "UR", "LL", "LR"]
def process_loc_globals(chip):
for y in range(0, max_row+1):
for x in range(0, max_col+1):
quad = chip.global_data.get_quadrant(y, x)
tapdrv = chip.global_data.get_tap_driver(y, x)
if tapdrv.col == x:
spinedrv = chip.global_data.get_spine_driver(quad, x)
spine = (spinedrv.second, spinedrv.first)
else:
spine = (-1, -1)
global_data[x, y] = (quadrants.index(quad), int(tapdrv.dir), tapdrv.col, spine)
speed_grade_names = ["6", "7", "8", "8_5G"]
speed_grade_cells = {}
speed_grade_pips = {}
pip_class_to_idx = {"default": 0, "zero": 1}
timing_port_xform = {
"RAD0": "D0",
"RAD1": "B0",
"RAD2": "C0",
"RAD3": "A0",
}
def process_timing_data():
for grade in speed_grade_names:
with open(timing_dbs.cells_db_path("ECP5", grade)) as f:
cell_data = json.load(f)
cells = []
for cell, cdata in sorted(cell_data.items()):
celltype = constids[cell.replace(":", "_").replace("=", "_").replace(",", "_")]
delays = []
setupholds = []
for entry in cdata:
if entry["type"] == "Width":
continue
elif entry["type"] == "IOPath":
from_pin = entry["from_pin"][1] if type(entry["from_pin"]) is list else entry["from_pin"]
if from_pin in timing_port_xform:
from_pin = timing_port_xform[from_pin]
to_pin = entry["to_pin"]
if to_pin in timing_port_xform:
to_pin = timing_port_xform[to_pin]
min_delay = min(entry["rising"][0], entry["falling"][0])
max_delay = min(entry["rising"][2], entry["falling"][2])
delays.append((constids[from_pin], constids[to_pin], min_delay, max_delay))
elif entry["type"] == "SetupHold":
if type(entry["pin"]) is list:
continue
pin = constids[entry["pin"]]
clock = constids[entry["clock"][1]]
min_setup = entry["setup"][0]
max_setup = entry["setup"][2]
min_hold = entry["hold"][0]
max_hold = entry["hold"][2]
setupholds.append((pin, clock, min_setup, max_setup, min_hold, max_hold))
else:
assert False, entry["type"]
cells.append((celltype, delays, setupholds))
pip_class_delays = []
for i in range(len(pip_class_to_idx)):
pip_class_delays.append((50, 50, 0, 0))
pip_class_delays[pip_class_to_idx["zero"]] = (0, 0, 0, 0)
with open(timing_dbs.interconnect_db_path("ECP5", grade)) as f:
interconn_data = json.load(f)
for pipclass, pipdata in sorted(interconn_data.items()):
min_delay = pipdata["delay"][0] * 1.1
max_delay = pipdata["delay"][2] * 1.1
min_fanout = pipdata["fanout"][0]
max_fanout = pipdata["fanout"][2]
if grade == "6":
pip_class_to_idx[pipclass] = len(pip_class_delays)
pip_class_delays.append((min_delay, max_delay, min_fanout, max_fanout))
else:
if pipclass in pip_class_to_idx:
pip_class_delays[pip_class_to_idx[pipclass]] = (min_delay, max_delay, min_fanout, max_fanout)
speed_grade_cells[grade] = cells
speed_grade_pips[grade] = pip_class_delays
def get_pip_class(wire_from, wire_to):
if "FCO" in wire_from or "FCI" in wire_to:
return pip_class_to_idx["zero"]
if "F5" in wire_from or "FX" in wire_from or "FXA" in wire_to or "FXB" in wire_to:
return pip_class_to_idx["zero"]
class_name = pip_classes.get_pip_class(wire_from, wire_to)
if class_name is None or class_name not in pip_class_to_idx:
class_name = "default"
return pip_class_to_idx[class_name]
def write_database(dev_name, chip, ddrg, endianness):
def write_loc(loc, sym_name):
bba.u16(loc.x, "%s.x" % sym_name)
bba.u16(loc.y, "%s.y" % sym_name)
loctypes = list([_.key() for _ in ddrg.locationTypes])
loc_with_type = {}
for y in range(0, max_row+1):
for x in range(0, max_col+1):
loc_with_type[loctypes.index(ddrg.typeAtLocation[pytrellis.Location(x, y)])] = (x, y)
def get_wire_name(arc_loctype, rel, idx):
loc = loc_with_type[arc_loctype]
lt = ddrg.typeAtLocation[pytrellis.Location(loc[0] + rel.x, loc[1] + rel.y)]
wire = ddrg.locationTypes[lt].wires[idx]
return "R{}C{}_{}".format(loc[1] + rel.y, loc[0] + rel.x, ddrg.to_str(wire.name))
bba = BinaryBlobAssembler()
bba.pre('#include "nextpnr.h"')
bba.pre('#include "embed.h"')
bba.pre('NEXTPNR_NAMESPACE_BEGIN')
bba.post('EmbeddedFile chipdb_file_%s("ecp5/chipdb-%s.bin", chipdb_blob_%s);' % (dev_name, dev_name, dev_name))
bba.post('NEXTPNR_NAMESPACE_END')
bba.push("chipdb_blob_%s" % dev_name)
bba.r("chip_info", "chip_info")
for idx in range(len(loctypes)):
loctype = ddrg.locationTypes[loctypes[idx]]
if len(loctype.arcs) > 0:
bba.l("loc%d_pips" % idx, "PipInfoPOD")
for arc in loctype.arcs:
write_loc(arc.srcWire.rel, "src")
write_loc(arc.sinkWire.rel, "dst")
bba.u32(arc.srcWire.id, "src_idx")
bba.u32(arc.sinkWire.id, "dst_idx")
src_name = get_wire_name(idx, arc.srcWire.rel, arc.srcWire.id)
snk_name = get_wire_name(idx, arc.sinkWire.rel, arc.sinkWire.id)
bba.u32(get_pip_class(src_name, snk_name), "timing_class")
bba.u16(get_tiletype_index(ddrg.to_str(arc.tiletype)), "tile_type")
cls = arc.cls
if cls == 1 and "PCS" in snk_name or "DCU" in snk_name or "DCU" in src_name:
cls = 2
bba.u8(cls, "pip_type")
bba.u8(0, "padding")
if len(loctype.wires) > 0:
for wire_idx in range(len(loctype.wires)):
wire = loctype.wires[wire_idx]
if len(wire.arcsDownhill) > 0:
bba.l("loc%d_wire%d_downpips" % (idx, wire_idx), "PipLocatorPOD")
for dp in wire.arcsDownhill:
write_loc(dp.rel, "rel_loc")
bba.u32(dp.id, "index")
if len(wire.arcsUphill) > 0:
bba.l("loc%d_wire%d_uppips" % (idx, wire_idx), "PipLocatorPOD")
for up in wire.arcsUphill:
write_loc(up.rel, "rel_loc")
bba.u32(up.id, "index")
if len(wire.belPins) > 0:
bba.l("loc%d_wire%d_belpins" % (idx, wire_idx), "BelPortPOD")
for bp in wire.belPins:
write_loc(bp.bel.rel, "rel_bel_loc")
bba.u32(bp.bel.id, "bel_index")
bba.u32(constids[ddrg.to_str(bp.pin)], "port")
bba.l("loc%d_wires" % idx, "WireInfoPOD")
for wire_idx in range(len(loctype.wires)):
wire = loctype.wires[wire_idx]
bba.s(ddrg.to_str(wire.name), "name")
bba.u32(constids[wire_type(ddrg.to_str(wire.name))], "type")
if ("TILE_WIRE_" + ddrg.to_str(wire.name)) in gfx_wire_ids:
bba.u32(gfx_wire_ids["TILE_WIRE_" + ddrg.to_str(wire.name)], "tile_wire")
else:
bba.u32(0, "tile_wire")
bba.r_slice("loc%d_wire%d_uppips" % (idx, wire_idx) if len(wire.arcsUphill) > 0 else None, len(wire.arcsUphill), "pips_uphill")
bba.r_slice("loc%d_wire%d_downpips" % (idx, wire_idx) if len(wire.arcsDownhill) > 0 else None, len(wire.arcsDownhill), "pips_downhill")
bba.r_slice("loc%d_wire%d_belpins" % (idx, wire_idx) if len(wire.belPins) > 0 else None, len(wire.belPins), "bel_pins")
if len(loctype.bels) > 0:
for bel_idx in range(len(loctype.bels)):
bel = loctype.bels[bel_idx]
bba.l("loc%d_bel%d_wires" % (idx, bel_idx), "BelWirePOD")
for pin in bel.wires:
write_loc(pin.wire.rel, "rel_wire_loc")
bba.u32(pin.wire.id, "wire_index")
bba.u32(constids[ddrg.to_str(pin.pin)], "port")
bba.u32(int(pin.dir), "dir")
bba.l("loc%d_bels" % idx, "BelInfoPOD")
for bel_idx in range(len(loctype.bels)):
bel = loctype.bels[bel_idx]
bba.s(ddrg.to_str(bel.name), "name")
bba.u32(constids[ddrg.to_str(bel.type)], "type")
bba.u32(bel.z, "z")
bba.r_slice("loc%d_bel%d_wires" % (idx, bel_idx), len(bel.wires), "bel_wires")
bba.l("locations", "LocationTypePOD")
for idx in range(len(loctypes)):
loctype = ddrg.locationTypes[loctypes[idx]]
bba.r_slice("loc%d_bels" % idx if len(loctype.bels) > 0 else None, len(loctype.bels), "bel_data")
bba.r_slice("loc%d_wires" % idx if len(loctype.wires) > 0 else None, len(loctype.wires), "wire_data")
bba.r_slice("loc%d_pips" % idx if len(loctype.arcs) > 0 else None, len(loctype.arcs), "pips_data")
for y in range(0, max_row+1):
for x in range(0, max_col+1):
bba.l("tile_info_%d_%d" % (x, y), "TileNamePOD")
for tile in chip.get_tiles_by_position(y, x):
bba.s(tile.info.name, "name")
bba.u16(get_tiletype_index(tile.info.type), "type_idx")
bba.u16(0, "padding")
bba.l("tiles_info", "TileInfoPOD")
for y in range(0, max_row+1):
for x in range(0, max_col+1):
bba.r_slice("tile_info_%d_%d" % (x, y), len(chip.get_tiles_by_position(y, x)), "tile_names")
bba.l("location_types", "int32_t")
for y in range(0, max_row+1):
for x in range(0, max_col+1):
bba.u32(loctypes.index(ddrg.typeAtLocation[pytrellis.Location(x, y)]), "loctype")
bba.l("location_glbinfo", "GlobalInfoPOD")
for y in range(0, max_row+1):
for x in range(0, max_col+1):
bba.u16(global_data[x, y][2], "tap_col")
bba.u8(global_data[x, y][1], "tap_dir")
bba.u8(global_data[x, y][0], "quad")
bba.u16(global_data[x, y][3][1], "spine_row")
bba.u16(global_data[x, y][3][0], "spine_col")
for package, pkgdata in sorted(packages.items()):
bba.l("package_data_%s" % package, "PackagePinPOD")
for pin in pkgdata:
name, loc, bel_idx = pin
bba.s(name, "name")
write_loc(loc, "abs_loc")
bba.u32(bel_idx, "bel_index")
bba.l("package_data", "PackageInfoPOD")
for package, pkgdata in sorted(packages.items()):
bba.s(package, "name")
bba.r_slice("package_data_%s" % package, len(pkgdata), "pin_data")
bba.l("pio_info", "PIOInfoPOD")
for pin in pindata:
loc, bel_idx, bank, func, dqs = pin
write_loc(loc, "abs_loc")
bba.u32(bel_idx, "bel_index")
if func is not None and func != "WRITEN":
bba.s(func, "function_name")
else:
bba.r(None, "function_name")
bba.u16(bank, "bank")
bba.u16(dqs, "dqsgroup")
bba.l("tiletype_names", "RelPtr<char>")
for tt, idx in sorted(tiletype_names.items(), key=lambda x: x[1]):
bba.s(tt, "name")
for grade in speed_grade_names:
for cell in speed_grade_cells[grade]:
celltype, delays, setupholds = cell
if len(delays) > 0:
bba.l("cell_%d_delays_%s" % (celltype, grade))
for delay in delays:
from_pin, to_pin, min_delay, max_delay = delay
bba.u32(from_pin, "from_pin")
bba.u32(to_pin, "to_pin")
bba.u32(min_delay, "min_delay")
bba.u32(max_delay, "max_delay")
if len(setupholds) > 0:
bba.l("cell_%d_setupholds_%s" % (celltype, grade))
for sh in setupholds:
pin, clock, min_setup, max_setup, min_hold, max_hold = sh
bba.u32(pin, "sig_port")
bba.u32(clock, "clock_port")
bba.u32(min_setup, "min_setup")
bba.u32(max_setup, "max_setup")
bba.u32(min_hold, "min_hold")
bba.u32(max_hold, "max_hold")
bba.l("cell_timing_data_%s" % grade)
for cell in speed_grade_cells[grade]:
celltype, delays, setupholds = cell
bba.u32(celltype, "cell_type")
bba.r_slice("cell_%d_delays_%s" % (celltype, grade) if len(delays) > 0 else None, len(delays), "delays")
bba.r_slice("cell_%d_setupholds_%s" % (celltype, grade) if len(delays) > 0 else None, len(setupholds), "setupholds")
bba.l("pip_timing_data_%s" % grade)
for pipclass in speed_grade_pips[grade]:
min_delay, max_delay, min_fanout, max_fanout = pipclass
bba.u32(min_delay, "min_delay")
bba.u32(max_delay, "max_delay")
bba.u32(min_fanout, "min_fanout")
bba.u32(max_fanout, "max_fanout")
bba.l("speed_grade_data")
for grade in speed_grade_names:
bba.r_slice("cell_timing_data_%s" % grade, len(speed_grade_cells[grade]), "cell_timings")
bba.r_slice("pip_timing_data_%s" % grade, len(speed_grade_pips[grade]), "pip_classes")
bba.l("chip_info")
bba.u32(max_col + 1, "width")
bba.u32(max_row + 1, "height")
bba.u32((max_col + 1) * (max_row + 1), "num_tiles")
bba.u32(const_id_count, "const_id_count")
bba.r_slice("locations", len(loctypes), "locations")
bba.r_slice("location_types", (max_col + 1) * (max_row + 1), "location_type")
bba.r_slice("location_glbinfo", (max_col + 1) * (max_row + 1), "location_glbinfo")
bba.r_slice("tiletype_names", len(tiletype_names), "tiletype_names")
bba.r_slice("package_data", len(packages), "package_info")
bba.r_slice("pio_info", len(pindata), "pio_info")
bba.r_slice("tiles_info", (max_col + 1) * (max_row + 1), "tile_info")
bba.r_slice("speed_grade_data", len(speed_grade_names), "speed_grades")
bba.pop()
return bba
dev_names = {"25k": "LFE5UM5G-25F", "45k": "LFE5UM5G-45F", "85k": "LFE5UM5G-85F"}
def main():
global max_row, max_col, const_id_count
pytrellis.load_database(database.get_db_root())
args = parser.parse_args()
# Read port pin file
const_id_count = 1 # count ID_NONE
with open(args.constids) as f:
for line in f:
line = line.replace("(", " ")
line = line.replace(")", " ")
line = line.split()
if len(line) == 0:
continue
assert len(line) == 2
assert line[0] == "X"
idx = len(constids) + 1
constids[line[1]] = idx
const_id_count += 1
constids["SLICE"] = constids["TRELLIS_SLICE"]
constids["PIO"] = constids["TRELLIS_IO"]
# print("Initialising chip...")
chip = pytrellis.Chip(dev_names[args.device])
# print("Building routing graph...")
ddrg = pytrellis.make_dedup_chipdb(chip)
max_row = chip.get_max_row()
max_col = chip.get_max_col()
process_timing_data()
process_pio_db(ddrg, args.device)
process_loc_globals(chip)
# print("{} unique location types".format(len(ddrg.locationTypes)))
bba = write_database(args.device, chip, ddrg, "le")
if __name__ == "__main__":
main()
|
datasets/movielens_pinterest_NCF/get_train_data.py | ziyoujiyi/PaddleRec | 2,739 | 12649280 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import scipy.sparse as sp
import numpy as np
from time import time
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="Run GMF.")
parser.add_argument(
'--path', nargs='?', default='Data/', help='Input data path.')
parser.add_argument(
'--dataset', nargs='?', default='ml-1m', help='Choose a dataset.')
parser.add_argument(
'--num_neg',
type=int,
default=4,
help='Number of negative instances to pair with a positive instance.')
parser.add_argument(
'--train_data_path',
type=str,
default="Data/train_data.csv",
help='train_data_path')
return parser.parse_args()
def get_train_data(filename, write_file, num_negatives):
'''
Read .rating file and Return dok matrix.
The first line of .rating file is: num_users\t num_items
'''
# Get number of users and items
num_users, num_items = 0, 0
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
u, i = int(arr[0]), int(arr[1])
num_users = max(num_users, u)
num_items = max(num_items, i)
line = f.readline()
print("users_num:", num_users, "items_num:", num_items)
# Construct matrix
mat = sp.dok_matrix((num_users + 1, num_items + 1), dtype=np.float32)
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
user, item, rating = int(arr[0]), int(arr[1]), float(arr[2])
if (rating > 0):
mat[user, item] = 1.0
line = f.readline()
file = open(write_file, 'w')
print("writing " + write_file)
for (u, i) in mat.keys():
# positive instance
user_input = str(u)
item_input = str(i)
label = str(1)
sample = "{0},{1},{2}".format(user_input, item_input, label) + "\n"
file.write(sample)
# negative instances
for t in range(num_negatives):
j = np.random.randint(num_items)
while (u, j) in mat.keys():
j = np.random.randint(num_items)
user_input = str(u)
item_input = str(j)
label = str(0)
sample = "{0},{1},{2}".format(user_input, item_input, label) + "\n"
file.write(sample)
if __name__ == "__main__":
args = parse_args()
get_train_data(args.path + args.dataset + ".train.rating",
args.train_data_path, args.num_neg)
|
trailscraper/record_sources/__init__.py | ocefpaf/trailscraper | 497 | 12649284 | """Package containing classes that represent a source of CloudTrail records, e.g. from an API or disk storage"""
|
awswrangler/quicksight/_delete.py | isichei/aws-data-wrangler | 2,695 | 12649333 | <filename>awswrangler/quicksight/_delete.py
"""Amazon QuickSight Delete Module."""
import logging
from typing import Any, Callable, Dict, Optional
import boto3
from awswrangler import _utils, exceptions, sts
from awswrangler.quicksight._get_list import (
get_dashboard_id,
get_data_source_id,
get_dataset_id,
get_template_id,
list_dashboards,
list_data_sources,
list_datasets,
list_templates,
)
_logger: logging.Logger = logging.getLogger(__name__)
def _delete(
func_name: str, account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None, **kwargs: Any
) -> None:
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if account_id is None:
account_id = sts.get_account_id(boto3_session=session)
client: boto3.client = _utils.client(service_name="quicksight", session=session)
func: Callable[..., None] = getattr(client, func_name)
func(AwsAccountId=account_id, **kwargs)
def delete_dashboard(
name: Optional[str] = None,
dashboard_id: Optional[str] = None,
version_number: Optional[int] = None,
account_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Delete a dashboard.
Note
----
You must pass a not None ``name`` or ``dashboard_id`` argument.
Parameters
----------
name : str, optional
Dashboard name.
dashboard_id : str, optional
The ID for the dashboard.
version_number : int, optional
The version number of the dashboard. If the version number property is provided,
only the specified version of the dashboard is deleted.
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_dashboard(name="...")
"""
if (name is None) and (dashboard_id is None):
raise exceptions.InvalidArgument("You must pass a not None name or dashboard_id argument.")
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if (dashboard_id is None) and (name is not None):
dashboard_id = get_dashboard_id(name=name, account_id=account_id, boto3_session=session)
args: Dict[str, Any] = {
"func_name": "delete_dashboard",
"account_id": account_id,
"boto3_session": session,
"DashboardId": dashboard_id,
}
if version_number is not None:
args["VersionNumber"] = version_number
_delete(**args)
def delete_dataset(
name: Optional[str] = None,
dataset_id: Optional[str] = None,
account_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Delete a dataset.
Note
----
You must pass a not None ``name`` or ``dataset_id`` argument.
Parameters
----------
name : str, optional
Dashboard name.
dataset_id : str, optional
The ID for the dataset.
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_dataset(name="...")
"""
if (name is None) and (dataset_id is None):
raise exceptions.InvalidArgument("You must pass a not None name or dataset_id argument.")
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if (dataset_id is None) and (name is not None):
dataset_id = get_dataset_id(name=name, account_id=account_id, boto3_session=session)
args: Dict[str, Any] = {
"func_name": "delete_data_set",
"account_id": account_id,
"boto3_session": session,
"DataSetId": dataset_id,
}
_delete(**args)
def delete_data_source(
name: Optional[str] = None,
data_source_id: Optional[str] = None,
account_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Delete a data source.
Note
----
You must pass a not None ``name`` or ``data_source_id`` argument.
Parameters
----------
name : str, optional
Dashboard name.
data_source_id : str, optional
The ID for the data source.
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_data_source(name="...")
"""
if (name is None) and (data_source_id is None):
raise exceptions.InvalidArgument("You must pass a not None name or data_source_id argument.")
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if (data_source_id is None) and (name is not None):
data_source_id = get_data_source_id(name=name, account_id=account_id, boto3_session=session)
args: Dict[str, Any] = {
"func_name": "delete_data_source",
"account_id": account_id,
"boto3_session": session,
"DataSourceId": data_source_id,
}
_delete(**args)
def delete_template(
name: Optional[str] = None,
template_id: Optional[str] = None,
version_number: Optional[int] = None,
account_id: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Delete a tamplate.
Note
----
You must pass a not None ``name`` or ``template_id`` argument.
Parameters
----------
name : str, optional
Dashboard name.
template_id : str, optional
The ID for the dashboard.
version_number : int, optional
Specifies the version of the template that you want to delete.
If you don't provide a version number, it deletes all versions of the template.
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_template(name="...")
"""
if (name is None) and (template_id is None):
raise exceptions.InvalidArgument("You must pass a not None name or template_id argument.")
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if (template_id is None) and (name is not None):
template_id = get_template_id(name=name, account_id=account_id, boto3_session=session)
args: Dict[str, Any] = {
"func_name": "delete_template",
"account_id": account_id,
"boto3_session": session,
"TemplateId": template_id,
}
if version_number is not None:
args["VersionNumber"] = version_number
_delete(**args)
def delete_all_dashboards(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None:
"""Delete all dashboards.
Parameters
----------
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_all_dashboards()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if account_id is None:
account_id = sts.get_account_id(boto3_session=session)
for dashboard in list_dashboards(account_id=account_id, boto3_session=session):
delete_dashboard(dashboard_id=dashboard["DashboardId"], account_id=account_id, boto3_session=session)
def delete_all_datasets(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None:
"""Delete all datasets.
Parameters
----------
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_all_datasets()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if account_id is None:
account_id = sts.get_account_id(boto3_session=session)
for dataset in list_datasets(account_id=account_id, boto3_session=session):
delete_dataset(dataset_id=dataset["DataSetId"], account_id=account_id, boto3_session=session)
def delete_all_data_sources(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None:
"""Delete all data sources.
Parameters
----------
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_all_data_sources()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if account_id is None:
account_id = sts.get_account_id(boto3_session=session)
for data_source in list_data_sources(account_id=account_id, boto3_session=session):
delete_data_source(data_source_id=data_source["DataSourceId"], account_id=account_id, boto3_session=session)
def delete_all_templates(account_id: Optional[str] = None, boto3_session: Optional[boto3.Session] = None) -> None:
"""Delete all templates.
Parameters
----------
account_id : str, optional
If None, the account ID will be inferred from your boto3 session.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> wr.quicksight.delete_all_templates()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if account_id is None:
account_id = sts.get_account_id(boto3_session=session)
for template in list_templates(account_id=account_id, boto3_session=session):
delete_template(template_id=template["TemplateId"], account_id=account_id, boto3_session=session)
|
leaf/api/settings.py | guiqiqi/leaf | 119 | 12649360 | <filename>leaf/api/settings.py<gh_stars>100-1000
"""API 接口的设置文件"""
from typing import Optional
from flask import abort
from ..core import error
class Authorization:
"""权限验证中的设置"""
ExecuteAPMissing = True # 在未找到接入点信息时是否允许
@staticmethod
def UnAuthorized(_reason: error.Error):
"""
验证失败时的返回值:
_reason: 原因-错误类型
"""
return abort(403)
@staticmethod
def NotPermitted(_diff: int, _strict: Optional[bool] = False):
"""
权限不足时的返回值:
_diff: 所需权限与拥有权限的差值
_strict: 是否指定需要某一级别权限值
"""
return abort(403)
class HTTPResponseHeader:
"""HTTP响应头部分的设置"""
AddCORSSupport = True # 是否启用 CORS 请求支持
CORSDomain = '*' # 启用支持 CORS 的域设置
SupportMethods = [
'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'CONNECT', 'OPTIONS',
'TRACE', 'PATCH'
] # 支持的请求类型 - 如非必要请勿修改
class Response:
"""响应中的设置"""
Code = "code" # 错误代码键
Description = "description" # 错误解释键
Message = "message" # 错误消息键
class Codes:
"""响应代码设置"""
Success = 0 # 未发生错误的成功代码
Unknown = -1 # 未知错误代码
class Messages:
"""响应消息设置"""
Success = "success" # 未发生错误时的成功消息
Unknown = "undefined" # 未知错误消息
class Descriptions:
"""响应解释设置"""
Success = "成功" # 成功时的解释
Unknown = "发生未知错误" # 未知错误解释
|
dataflows/base/datastream.py | cschloer/dataflows | 160 | 12649362 | from datapackage import Package
class DataStream:
def __init__(self, dp=None, res_iter=None, stats=None):
self.dp = dp if dp is not None else Package()
self.res_iter = res_iter if res_iter is not None else []
self.stats = stats if stats is not None else []
def merge_stats(self):
ret = {}
for s in self.stats:
ret.update(s)
return ret
def _process(self):
return self
|
common/concertina_lib.py | RAbraham/logica | 1,434 | 12649381 | <filename>common/concertina_lib.py
"""Concertina: small Python Workflow execution handler."""
import datetime
import graphviz
from IPython.display import display
from IPython.display import update_display
class ConcertinaQueryEngine(object):
def __init__(self, final_predicates, sql_runner):
self.final_predicates = final_predicates
self.final_result = {}
self.sql_runner = sql_runner
def Run(self, action):
assert action['launcher'] in ('query', 'none')
if action['launcher'] == 'query':
predicate = action['predicate']
print('Running predicate:', predicate, end='')
start = datetime.datetime.now()
result = self.sql_runner(action['sql'], action['engine'],
is_final=(predicate in self.final_predicates))
end = datetime.datetime.now()
print(' (%d seconds)' % (end - start).seconds)
if predicate in self.final_predicates:
self.final_result[predicate] = result
class ConcertinaDryRunEngine(object):
def Run(self, action):
print(action)
class Concertina(object):
DISPLAY_COUNT = 0
@classmethod
def GetDisplayId(cls):
cls.DISPLAY_COUNT = cls.DISPLAY_COUNT + 1
return 'Concertina_%d' % cls.DISPLAY_COUNT
def SortActions(self):
actions_to_assign = {a['name'] for a in self.config}
complete = set()
result = []
while actions_to_assign:
remains = len(actions_to_assign)
for a in list(actions_to_assign):
if complete >= set(self.action[a]["requires"]):
result.append(a)
complete |= {a}
actions_to_assign -= {a}
if len(actions_to_assign) == remains:
assert False, "Could not schedule: %s" % self.config
return result
def __init__(self, config, engine):
self.config = config
self.action = {a["name"]: a for a in self.config}
self.actions_to_run = self.SortActions()
self.engine = engine
assert len(self.action) == len(self.config)
self.all_actions = {a["name"] for a in self.config}
self.complete_actions = set()
self.running_actions = set()
self.display_id = self.GetDisplayId()
self.Display()
def RunOneAction(self):
self.UpdateDisplay()
one_action = self.actions_to_run[0]
del self.actions_to_run[0]
self.running_actions |= {one_action}
self.UpdateDisplay()
self.engine.Run(self.action[one_action].get('action', {}))
self.running_actions -= {one_action}
self.complete_actions |= {one_action}
self.UpdateDisplay()
def Run(self):
while self.actions_to_run:
self.RunOneAction()
def ActionColor(self, a):
if self.action[a].get('type') == 'data':
return 'lightskyblue1'
if a in self.complete_actions:
return 'darkolivegreen1'
if a in self.running_actions:
return 'gold'
return 'gray'
def ActionShape(self, a):
if 'type' in self.action[a]:
action_type = self.action[a]['type']
if action_type == 'data':
return 'cylinder'
if action_type == 'final':
return 'diamond'
return 'box'
def AsGraphViz(self):
g = graphviz.Digraph('Concertina')
for a in self.all_actions:
color = self.ActionColor(a)
shape = self.ActionShape(a)
styles = ['filled']
g.node(a, shape=shape, fillcolor=color, style='filled,rounded', color='gray34')
for prerequisite in self.action[a]['requires']:
g.edge(prerequisite, a)
return g
def Display(self):
display(self.AsGraphViz(), display_id=self.display_id)
def UpdateDisplay(self):
update_display(self.AsGraphViz(), display_id=self.display_id)
def RenamePredicate(table_to_export_map, dependency_edges,
data_dependency_edges, from_name, to_name):
new_table_to_export_map = {}
new_dependency_edges = set()
new_data_dependency_edges = set()
for k, v in table_to_export_map.items():
if k == from_name:
new_table_to_export_map[to_name] = v
else:
new_table_to_export_map[k] = v
for a, b in dependency_edges:
if a == from_name:
a = to_name
if b == from_name:
b = to_name
new_dependency_edges.add((a, b))
for a, b in data_dependency_edges:
if a == from_name:
a = to_name
if b == from_name:
b = to_name
new_data_dependency_edges.add((a, b))
return new_table_to_export_map, new_dependency_edges, new_data_dependency_edges
def ExecuteLogicaProgram(logica_executions, sql_runner, sql_engine):
def ConcertinaConfig(table_to_export_map, dependency_edges,
data_dependency_edges, final_predicates):
depends_on = {}
for source, target in dependency_edges | data_dependency_edges:
depends_on[target] = depends_on.get(target, set()) | {source}
data = {d for d, _ in data_dependency_edges}
data |= {d for d, _ in dependency_edges if d not in table_to_export_map}
result = []
for d in data:
result.append({
'name': d,
'type': 'data',
'requires': [],
'action': {
'predicate': d,
'launcher': 'none'
}
})
for t, sql in table_to_export_map.items():
result.append({
'name': t,
'type': ('final' if t in final_predicates else 'intermediate'),
'requires': list(depends_on.get(t, set())),
'action': {
'predicate': t,
'launcher': 'query',
'engine': sql_engine,
'sql': sql
}
})
return result
table_to_export_map = {}
dependency_edges = set()
data_dependency_edges = set()
final_predicates = {e.main_predicate for e in logica_executions}
for e in logica_executions:
p_table_to_export_map, p_dependency_edges, p_data_dependency_edges = (
e.table_to_export_map, e.dependency_edges, e.data_dependency_edges
)
for p in final_predicates:
if e.main_predicate != p and p in e.table_to_export_map:
p_table_to_export_map, p_dependency_edges, p_data_dependency_edges = (
RenamePredicate(
p_table_to_export_map, p_dependency_edges, p_data_dependency_edges,
p, '⤓' + p))
for k, v in p_table_to_export_map.items():
table_to_export_map[k] = e.PredicateSpecificPreamble(e.main_predicate) + v
for a, b in p_dependency_edges:
dependency_edges.add((a, b))
for a, b in p_data_dependency_edges:
data_dependency_edges.add((a, b))
config = ConcertinaConfig(table_to_export_map,
dependency_edges,
data_dependency_edges,
final_predicates)
engine = ConcertinaQueryEngine(
final_predicates=final_predicates, sql_runner=sql_runner)
preambles = set(e.preamble for e in logica_executions)
assert len(preambles) == 1, 'Inconsistent preambles: %s' % preambles
[preamble] = list(preambles)
if preamble:
sql_runner(preamble, sql_engine, is_final=False)
concertina = Concertina(config, engine)
concertina.Run()
return engine.final_result
|
Spyder/DecryptLogin_note/DecryptLogin_modules/core/sohu.py | Lightblues/10-playground | 2,268 | 12649393 | '''
Function:
搜狐模拟登录
Author:
Charles
微信公众号:
Charles的皮卡丘
更新日期:
2020-10-29
'''
import time
import requests
from hashlib import md5
'''PC端登录搜狐'''
class sohuPC():
is_callable = False
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in sohu in pc mode'
self.session = requests.Session()
self.__initialize()
'''登录函数'''
def login(self, username, password, crack_captcha_func=None, **kwargs):
# 设置代理
self.session.proxies.update(kwargs.get('proxies', {}))
# 请求home_url
self.session.get(self.home_url)
# 请求login_url
data = {
'userid': username,
'password': <PASSWORD>(password.encode(encoding='utf-8')).<PASSWORD>(),
'persistentCookie': '1',
'appid': '116005',
}
response = self.session.post(self.login_url, data=data)
response_json = response.json()
# 登录成功
if response_json.get('status') == 200 and response_json.get('message') == 'Success':
print('[INFO]: Account -> %s, login successfully' % username)
infos_return = {'username': username}
return infos_return, self.session
# 账号或密码有误
elif response_json.get('status') in [404, 459]:
raise RuntimeError('Account -> %s, fail to login, username or password error' % username)
# 其他原因
else:
raise RuntimeError(response_json.get('message'))
'''初始化'''
def __initialize(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36',
'origin': 'https://www.sohu.com',
'upgrade-insecure-requests': '1',
'referer': 'https://www.sohu.com/',
'origin': 'https://www.sohu.com'
}
self.home_url = 'http://www.sohu.com/'
self.login_url = 'https://v4.passport.sohu.com/i/login/116005'
self.session.headers.update(self.headers)
'''移动端登录搜狐'''
class sohuMobile():
is_callable = True
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in sohu in mobile mode'
self.session = requests.Session()
self.__initialize()
'''登录函数'''
def login(self, username, password, crack_captcha_func=None, **kwargs):
# 设置代理
self.session.proxies.update(kwargs.get('proxies', {}))
# 访问app_login_url
params = {
'appid': 116001,
'r': 'https://m.sohu.com/ucenter?_from=passport'
}
self.session.get(self.app_login_url, params=params)
# 请求security_login_url
data = {
'userid': username,
'password': <PASSWORD>(<PASSWORD>(encoding='utf-8')).<PASSWORD>(),
'appid': 116001
}
self.session.headers.update({
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://m.passport.sohu.com',
'Referer': 'https://m.passport.sohu.com/app/login?appid=116001&r=https%3A%2F%2Fm.sohu.com%2Fucenter%3F_from%3Dpassport'
})
response = self.session.post(self.security_login_url.format(int(time.time()*1000)), data=data)
response_json = response.json()
# 登录成功
if response_json.get('status') == 200 and response_json.get('message') == 'Success':
print('[INFO]: Account -> %s, login successfully' % username)
infos_return = {'username': username}
return infos_return, self.session
# 账号或密码有误
elif response_json.get('status') in [404, 459]:
raise RuntimeError('Account -> %s, fail to login, username or password error' % username)
# 其他原因
else:
raise RuntimeError(response_json.get('message'))
'''初始化'''
def __initialize(self):
self.headers = {
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
}
self.app_login_url = 'https://m.passport.sohu.com/app/login'
self.security_login_url = 'https://m.passport.sohu.com/security/login?t={}'
self.session.headers.update(self.headers)
'''扫码登录搜狐'''
class sohuScanqr():
is_callable = False
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in sohu in scanqr mode'
'''
Function:
搜狐模拟登录
Detail:
-login:
Input:
--username: 用户名
--password: 密码
--mode: mobile/pc/scanqr
--crack_captcha_func: 若提供验证码接口, 则利用该接口来实现验证码的自动识别
--proxies: 为requests.Session()设置代理
Return:
--infos_return: 用户名等信息
--session: 登录后的requests.Session()
'''
class sohu():
def __init__(self, **kwargs):
self.info = 'login in sohu'
self.supported_modes = {
'pc': sohuPC(**kwargs),
'mobile': sohuMobile(**kwargs),
'scanqr': sohuScanqr(**kwargs),
}
'''登录函数'''
def login(self, username, password, mode='mobile', crack_captcha_func=None, **kwargs):
assert mode in self.supported_modes, 'unsupport mode %s in sohu.login' % mode
selected_api = self.supported_modes[mode]
if not selected_api.is_callable: raise NotImplementedError('not be implemented for mode %s in sohu.login' % mode)
args = {
'username': username,
'password': password,
'crack_captcha_func': crack_captcha_func,
}
args.update(kwargs)
return selected_api.login(**args) |
Trakttv.bundle/Contents/Libraries/Shared/plugin/core/libraries/tests/core/base.py | disrupted/Trakttv.bundle | 1,346 | 12649405 | from plugin.core.helpers.variable import merge
from subprocess import Popen
import json
import logging
import os
import subprocess
import sys
CURRENT_PATH = os.path.abspath(__file__)
HOST_PATH = os.path.join(os.path.dirname(CURRENT_PATH), 'host.py')
log = logging.getLogger(__name__)
class BaseTest(object):
name = None
optional = False
@classmethod
def run(cls, search_paths):
metadata = {}
message = None
success = None
# Retrieve names of test functions
names = [
name for name in dir(cls)
if name.startswith('test_')
]
if not names:
return cls.build_failure('No tests defined')
# Run tests
for name in names:
# Ensure function exists
if not hasattr(cls, name):
return cls.build_failure('Unable to find function: %r' % name)
# Run test
try:
result = cls.spawn(name, search_paths)
# Merge test result into `metadata`
merge(metadata, result, recursive=True)
# Test successful
message = None
success = True
except Exception as ex:
if success:
continue
message = ex.message
success = False
if not success:
# Trigger event
cls.on_failure(message)
# Build result
return cls.build_failure(message)
# Trigger event
cls.on_success(metadata)
# Build result
return cls.build_success(metadata)
@classmethod
def spawn(cls, name, search_paths):
# Find path to python executable
python_exe = cls.find_python_executable()
if not python_exe:
raise Exception('Unable to find python executable')
# Ensure test host exists
if not os.path.exists(HOST_PATH):
raise Exception('Unable to find "host.py" script')
# Build test process arguments
args = [
python_exe, HOST_PATH,
'--module', cls.__module__,
'--name', name,
'--search-paths="%s"' % (
';'.join(search_paths)
),
]
# Spawn test (in sub-process)
log.debug('Starting test: %s:%s', cls.__module__, name)
process = Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# Wait for test to complete
stdout, stderr = process.communicate()
if stderr:
log.debug('Test returned messages:\n%s', stderr.replace("\r\n", "\n"))
# Parse output
result = None
if stdout:
try:
result = json.loads(stdout)
except Exception as ex:
log.warn('Invalid output returned %r - %s', stdout, ex, exc_info=True)
# Build result
if process.returncode != 0:
# Test failed
if result and result.get('message'):
if result.get('traceback'):
log.info('%s - %s', result['message'], result['traceback'])
raise Exception(result['message'])
raise Exception('Unknown error (code: %s)' % process.returncode)
# Test successful
return result
@classmethod
def find_python_executable(cls):
candidates = [sys.executable]
# Add candidates based on the script path in `sys.argv`
if sys.argv and len(sys.argv) > 0 and os.path.exists(sys.argv[0]):
bootstrap_path = sys.argv[0]
resources_pos = bootstrap_path.lower().find('resources')
if resources_pos > 0:
pms_path = bootstrap_path[:resources_pos]
cls._add_python_home_candidates(candidates, pms_path)
# Add candidates relative to `PLEX_MEDIA_SERVER_HOME`
pms_home = os.environ.get('PLEX_MEDIA_SERVER_HOME')
if pms_home and os.path.exists(pms_home):
cls._add_python_home_candidates(candidates, pms_home)
# Add candidates relative to `PYTHONHOME`
python_home = os.environ.get('PYTHONHOME')
if python_home and os.path.exists(python_home):
candidates.append(os.path.join(python_home, 'bin', 'python'))
# Use first candidate that exists
for path in candidates:
if os.path.exists(path):
return path
log.warn('Unable to find python executable', extra={'candidates': candidates})
return None
@staticmethod
def _add_python_home_candidates(candidates, path):
# Windows
candidates.append(os.path.join(path, 'PlexScriptHost.exe'))
# *nix
candidates.append(os.path.join(path, 'Plex Script Host'))
candidates.append(os.path.join(path, 'Resources', 'Plex Script Host'))
candidates.append(os.path.join(path, 'Resources', 'Python', 'bin', 'python'))
#
# Events
#
@classmethod
def on_failure(cls, message):
pass
@classmethod
def on_success(cls, metadata):
pass
#
# Helpers
#
@classmethod
def build_exception(cls, message, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
return cls.build_failure(
message,
exc_info=exc_info
)
@classmethod
def build_failure(cls, message, **kwargs):
result = {
'success': False,
'message': message
}
# Merge extra attributes
merge(result, kwargs)
return result
@staticmethod
def build_success(metadata):
return {
'success': True,
'metadata': metadata
}
|
pygmt/tests/test_logo.py | daroari/pygmt | 326 | 12649417 | """
Tests for fig.logo.
"""
import pytest
from pygmt import Figure
@pytest.mark.mpl_image_compare
def test_logo():
"""
Plot the GMT logo as a stand-alone plot.
"""
fig = Figure()
fig.logo()
return fig
@pytest.mark.mpl_image_compare
def test_logo_on_a_map():
"""
Plot the GMT logo at the upper right corner of a map.
"""
fig = Figure()
fig.basemap(region=[-90, -70, 0, 20], projection="M15c", frame=True)
fig.logo(position="jTR+o0.25c/0.25c+w7.5c", box=True)
return fig
|
mlmodel/docs/preprocess.py | LaudateCorpus1/coremltools | 11,356 | 12649441 | import os
import re
from itertools import izip
import inflection
def preprocess():
"splits _sources/reference.rst into separate files"
text = open("./_sources/reference.rst", "r").read()
os.remove("./_sources/reference.rst")
if not os.path.exists("./_sources/reference"):
os.makedirs("./_sources/reference")
def pairwise(iterable):
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
iteration = iter(iterable)
return izip(iteration, iteration)
sections = map(str.strip, re.split(r"<!--\s*(.+)\s*-->", text))
for section, content in pairwise(sections[1:]):
if section.endswith(".proto"):
section_name = section[: -len(".proto")]
file_name = "./_sources/reference/{0}.rst".format(section_name)
with open(file_name, "w") as f:
f.truncate()
f.write(content)
f.close()
if __name__ == "__main__":
preprocess()
|
mamba/cli.py | kfischer-okarin/mamba | 462 | 12649445 | # -*- coding: utf-8 -*-
import sys
import argparse
from mamba import application_factory, __version__
def main():
arguments = _parse_arguments()
if arguments.version:
print(__version__)
return
factory = application_factory.ApplicationFactory(arguments)
runner = factory.runner()
runner.run()
if runner.has_failed_examples:
sys.exit(1)
def _parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--version', '-v', default=False, action='store_true', help='display the version')
parser.add_argument('--slow', '-s', default=0.075, type=float, help='slow test threshold in seconds (default: %(default)s)')
parser.add_argument('--enable-coverage', default=False, action='store_true', help='enable code coverage measurement (default: %(default)s)')
parser.add_argument('--coverage-file', default='.coverage', action='store', help='name of coverage data file (default: %(default)s)')
parser.add_argument('--format', '-f', default='progress', action='store', help='output format (default: %(default)s)')
parser.add_argument('specs', default=['./spec', './specs'], nargs='*', help='paths to specs to run or directories with specs to run (default: %(default)s)')
parser.add_argument('--no-color', default=False, action='store_true', help='turn off all output coloring (default: %(default)s)')
parser.add_argument('--tags', '-t', default=None, type=lambda x: [tag.strip() for tag in x.split(',')], action='store', help='run examples with specified tags (example: -t unit,integration)')
return parser.parse_args()
if __name__ == '__main__':
main()
|
utils/llvm-build/llvmbuild/util.py | clayne/DirectXShaderCompiler | 4,812 | 12649447 | import os
import sys
def _write_message(kind, message):
program = os.path.basename(sys.argv[0])
sys.stderr.write('%s: %s: %s\n' % (program, kind, message))
note = lambda message: _write_message('note', message)
warning = lambda message: _write_message('warning', message)
error = lambda message: _write_message('error', message)
fatal = lambda message: (_write_message('fatal error', message), sys.exit(1))
__all__ = ['note', 'warning', 'error', 'fatal']
|
iota/commands/core/check_consistency.py | EasonC13/iota.py | 347 | 12649460 | <gh_stars>100-1000
import filters as f
from iota import TransactionHash
from iota.commands import FilterCommand, RequestFilter
from iota.filters import Trytes
__all__ = [
'CheckConsistencyCommand',
]
class CheckConsistencyCommand(FilterCommand):
"""
Executes ``checkConsistency`` extended API command.
See :py:meth:`iota.api.Iota.check_consistency` for more info.
"""
command = 'checkConsistency'
def get_request_filter(self):
return CheckConsistencyRequestFilter()
def get_response_filter(self):
pass
class CheckConsistencyRequestFilter(RequestFilter):
def __init__(self) -> None:
super(CheckConsistencyRequestFilter, self).__init__({
'tails':
f.Required |
f.Array |
f.FilterRepeater(f.Required | Trytes(TransactionHash)),
})
|
examples/graphsage/train.py | zbmain/PGL | 1,389 | 12649468 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import time
from functools import partial
import numpy as np
import tqdm
import pgl
import paddle
from pgl.utils.logger import log
from pgl.utils.data import Dataloader
from model import GraphSage
from dataset import ShardedDataset, batch_fn
def train(dataloader, model, feature, criterion, optim, log_per_step=100):
model.train()
batch = 0
total_loss = 0.
total_acc = 0.
total_sample = 0
for g, sample_index, index, label in dataloader:
batch += 1
num_samples = len(index)
g.tensor()
sample_index = paddle.to_tensor(sample_index)
index = paddle.to_tensor(index)
label = paddle.to_tensor(label)
feat = paddle.gather(feature, sample_index)
pred = model(g, feat)
pred = paddle.gather(pred, index)
loss = criterion(pred, label)
loss.backward()
acc = paddle.metric.accuracy(input=pred, label=label, k=1)
optim.step()
optim.clear_grad()
total_loss += loss.numpy() * num_samples
total_acc += acc.numpy() * num_samples
total_sample += num_samples
if batch % log_per_step == 0:
log.info("Batch %s %s-Loss %s %s-Acc %s" %
(batch, "train", loss.numpy(), "train", acc.numpy()))
return total_loss / total_sample, total_acc / total_sample
@paddle.no_grad()
def eval(dataloader, model, feature, criterion):
model.eval()
loss_all, acc_all = [], []
for g, sample_index, index, label in dataloader:
g.tensor()
sample_index = paddle.to_tensor(sample_index)
index = paddle.to_tensor(index)
label = paddle.to_tensor(label)
feat = paddle.gather(feature, sample_index)
pred = model(g, feat)
pred = paddle.gather(pred, index)
loss = criterion(pred, label)
acc = paddle.metric.accuracy(input=pred, label=label, k=1)
loss_all.append(loss.numpy())
acc_all.append(acc.numpy())
return np.mean(loss_all), np.mean(acc_all)
def main(args):
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
data = pgl.dataset.RedditDataset(args.normalize, args.symmetry)
log.info("Preprocess finish")
log.info("Train Examples: %s" % len(data.train_index))
log.info("Val Examples: %s" % len(data.val_index))
log.info("Test Examples: %s" % len(data.test_index))
log.info("Num nodes %s" % data.graph.num_nodes)
log.info("Num edges %s" % data.graph.num_edges)
log.info("Average Degree %s" % np.mean(data.graph.indegree()))
graph = data.graph
train_index = data.train_index
val_index = data.val_index
test_index = data.test_index
train_label = data.train_label
val_label = data.val_label
test_label = data.test_label
model = GraphSage(
input_size=data.feature.shape[-1],
num_class=data.num_classes,
hidden_size=args.hidden_size,
num_layers=len(args.samples))
model = paddle.DataParallel(model)
criterion = paddle.nn.loss.CrossEntropyLoss()
optim = paddle.optimizer.Adam(
learning_rate=args.lr,
parameters=model.parameters(),
weight_decay=0.001)
feature = paddle.to_tensor(data.feature)
train_ds = ShardedDataset(train_index, train_label)
val_ds = ShardedDataset(val_index, val_label)
test_ds = ShardedDataset(test_index, test_label)
collate_fn = partial(batch_fn, graph=graph, samples=args.samples)
train_loader = Dataloader(
train_ds,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.sample_workers,
collate_fn=collate_fn)
val_loader = Dataloader(
test_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.sample_workers,
collate_fn=collate_fn)
test_loader = Dataloader(
test_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.sample_workers,
collate_fn=collate_fn)
cal_val_acc = []
cal_test_acc = []
cal_val_loss = []
for epoch in tqdm.tqdm(range(args.epoch)):
train_loss, train_acc = train(train_loader, model, feature, criterion,
optim)
log.info("Runing epoch:%s\t train_loss:%s\t train_acc:%s", epoch,
train_loss, train_acc)
val_loss, val_acc = eval(val_loader, model, feature, criterion)
cal_val_acc.append(val_acc)
cal_val_loss.append(val_loss)
log.info("Runing epoch:%s\t val_loss:%s\t val_acc:%s", epoch, val_loss,
val_acc)
test_loss, test_acc = eval(test_loader, model, feature, criterion)
cal_test_acc.append(test_acc)
log.info("Runing epoch:%s\t test_loss:%s\t test_acc:%s", epoch,
test_loss, test_acc)
log.info("Runs %s: Model: %s Best Test Accuracy: %f" %
(0, "graphsage", cal_test_acc[np.argmax(cal_val_acc)]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='graphsage')
parser.add_argument(
"--normalize", action='store_true', help="normalize features")
parser.add_argument(
"--symmetry", action='store_true', help="undirect graph")
parser.add_argument("--sample_workers", type=int, default=5)
parser.add_argument("--epoch", type=int, default=10)
parser.add_argument("--hidden_size", type=int, default=128)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument('--samples', nargs='+', type=int, default=[25, 10])
args = parser.parse_args()
log.info(args)
main(args)
|
gateway/sensorsdb.py | QuPengfei/Smart-City-Sample | 126 | 12649472 | <filename>gateway/sensorsdb.py<gh_stars>100-1000
#!/usr/bin/python3
from urllib.parse import unquote
from tornado import web,gen
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
from db_query import DBQuery
from db_ingest import DBIngest
from language import encode
from configuration import env
import json
dbhost=env["DBHOST"]
office=list(map(float,env["OFFICE"].split(","))) if "OFFICE" in env else ""
class SensorsDBHandler(web.RequestHandler):
def __init__(self, app, request, **kwargs):
super(SensorsDBHandler, self).__init__(app, request, **kwargs)
self.executor= ThreadPoolExecutor(4)
self._dbi=DBIngest(index="sensors",office=office,host=dbhost)
def check_origin(self, origin):
return True
@run_on_executor
def _update(self, sensor, source):
try:
print("Ingesting", sensor, flush=True)
r=self._dbi.ingest(source, refresh="wait_for")
return r
except Exception as e:
print(str(e),flush=True)
return str(e)
@gen.coroutine
def put(self):
options=json.loads(self.request.body.decode('utf-8'))
r=yield self._update(sensor=options["sensor"], source=options["source"])
if isinstance(r,str):
self.set_status(400, encode(r))
return
self.write(r)
self.set_status(200,'OK')
self.finish()
|
ray-rllib/multi-armed-bandits/market_bandit.py | aksakalli/academy | 342 | 12649498 | <gh_stars>100-1000
import gym
from gym.spaces import Discrete, Box
from gym.utils import seeding
import numpy as np
import random
class MarketBandit (gym.Env):
def __init__ (self, config={}):
self.max_inflation = config.get('max-inflation', DEFAULT_MAX_INFLATION)
self.tickers = config.get('tickers', DEFAULT_TICKERS)
self.data_file = config.get('data-file', DEFAULT_DATA_FILE)
print(f"MarketBandit: max_inflation: {self.max_inflation}, tickers: {self.tickers}, data file: {self.data_file} (config: {config})")
self.action_space = Discrete(4)
self.observation_space = Box(
low = -self.max_inflation,
high = self.max_inflation,
shape=(1, )
)
self.df = load_market_data(self.data_file)
self.cur_context = None
def reset (self):
self.year = self.df["year"].min()
self.cur_context = self.df.loc[self.df["year"] == self.year]["inflation"][0]
self.done = False
self.info = {}
return [self.cur_context]
def step (self, action):
if self.done:
reward = 0.
regret = 0.
else:
row = self.df.loc[self.df["year"] == self.year]
# calculate reward
ticker = self.tickers[action]
reward = float(row[ticker])
# calculate regret
max_reward = max(map(lambda t: float(row[t]), self.tickers))
regret = round(max_reward - reward)
# update the context
self.cur_context = float(row["inflation"])
# increment the year
self.year += 1
if self.year >= self.df["year"].max():
self.done = True
context = [self.cur_context]
#context = self.observation_space.sample()
self.info = {
"regret": regret,
"year": self.year
}
return [context, reward, self.done, self.info]
def seed (self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
|
pyblur/RandomizedBlur.py | lospooky/pyblur | 102 | 12649532 | import numpy as np
from BoxBlur import BoxBlur_random
from DefocusBlur import DefocusBlur_random
from GaussianBlur import GaussianBlur_random
from LinearMotionBlur import LinearMotionBlur_random
from PsfBlur import PsfBlur_random
blurFunctions = {"0": BoxBlur_random, "1": DefocusBlur_random, "2": GaussianBlur_random, "3": LinearMotionBlur_random, "4": PsfBlur_random}
def RandomizedBlur(img):
blurToApply = blurFunctions[str(np.random.randint(0, len(blurFunctions)))]
return blurToApply(img) |
program/request-tvm-nnvm-inference/mali_imagenet_bench.py | ctuning/ck-request-asplos18-mobilenets-tvm | 174 | 12649550 | """
Benchmark inference speed on ImageNet
Example (run on Firefly RK3399):
python mali_imagenet_bench.py --target-host 'llvm -target=aarch64-linux-gnu' --host 192.168.0.100 --port 9090 --model mobilenet
"""
import time
import argparse
import numpy as np
import tvm
import nnvm.compiler
import nnvm.testing
from tvm.contrib import util, rpc
from tvm.contrib import graph_runtime as runtime
def run_case(model, dtype):
# load model
if model == 'vgg16':
net, params = nnvm.testing.vgg.get_workload(num_layers=16,
batch_size=1, image_shape=image_shape, dtype=dtype)
elif model == 'resnet18':
net, params = nnvm.testing.resnet.get_workload(num_layers=18,
batch_size=1, image_shape=image_shape, dtype=dtype)
elif model == 'mobilenet':
net, params = nnvm.testing.mobilenet.get_workload(
batch_size=1, image_shape=image_shape, dtype=dtype)
else:
raise ValueError('no benchmark prepared for {}.'.format(model))
# compile
opt_level = 2 if dtype == 'float32' else 1
with nnvm.compiler.build_config(opt_level=opt_level):
graph, lib, params = nnvm.compiler.build(
net, tvm.target.mali(), shape={"data": data_shape}, params=params,
dtype=dtype, target_host=args.target_host)
# upload model to remote device
tmp = util.tempdir()
lib_fname = tmp.relpath('net.tar')
lib.export_library(lib_fname)
if args.host is not None:
remote = rpc.connect(args.host, args.port)
remote.upload(lib_fname)
ctx = remote.cl(0)
rlib = remote.load_module('net.tar')
rparams = {k: tvm.nd.array(v, ctx) for k, v in params.items()}
else:
ctx = tvm.cl(0)
rlib = lib
rparams = params
# create graph runtime
module = runtime.create(graph, rlib, ctx)
module.set_input('data', tvm.nd.array(np.random.uniform(size=(data_shape)).astype(dtype)))
module.set_input(**rparams)
# benchmark
# print("============================================================")
# print("model: %s, dtype: %s" % (model, dtype))
# the num of runs for warm up and test
num_warmup = 10
num_test = 60
if model == 'mobilenet': # mobilenet is fast, need more runs for stable measureament
num_warmup *= 5
num_test *= 5
# perform some warm up runs
# print("warm up..")
warm_up_timer = module.module.time_evaluator("run", ctx, num_warmup)
warm_up_timer()
# test
# print("test..")
ftimer = module.module.time_evaluator("run", ctx, num_test)
prof_res = ftimer()
# print("cost per image: %.4fs" % prof_res.mean)
print("backend: TVM-mali\tmodel: %s\tdtype: %s\tcost:%.4f" % (model, dtype, prof_res.mean))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True, choices=['vgg16', 'resnet18', 'mobilenet', 'all'],
help="The model type.")
parser.add_argument('--dtype', type=str, default='float32', choices=['float16', 'float32'])
parser.add_argument('--host', type=str, help="The host address of your arm device.", default=None)
parser.add_argument('--port', type=int, help="The port number of your arm device", default=None)
parser.add_argument('--target-host', type=str, help="The compilation target of host device.", default=None)
args = parser.parse_args()
# set parameter
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
# load model
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_classes)
if args.model == 'all': # test all
for model in ['vgg16', 'resnet18', 'mobilenet']:
for dtype in ['float32', 'float16']:
run_case(model, dtype)
time.sleep(10)
else: # test single
run_case(args.model, args.dtype)
|
onnx2pytorch/constants.py | Robust-Robots/onnx2pytorch | 147 | 12649552 | <filename>onnx2pytorch/constants.py
from torch import nn
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.pooling import _MaxPoolNd
from onnx2pytorch.operations import (
BatchNormWrapper,
InstanceNormWrapper,
Loop,
LSTMWrapper,
Split,
TopK,
)
COMPOSITE_LAYERS = (nn.Sequential,)
MULTIOUTPUT_LAYERS = (_MaxPoolNd, Loop, LSTMWrapper, Split, TopK)
STANDARD_LAYERS = (
_ConvNd,
BatchNormWrapper,
InstanceNormWrapper,
LSTMWrapper,
nn.Linear,
)
|
cmd_ir/instructions/events.py | Commodoreprime/Command-Block-Assembly | 223 | 12649553 | """Events"""
from ._core import PreambleInsn, ConstructorInsn, SingleCommandInsn
from ..core_types import (VirtualString,
AdvEventRef,
TagEventRef,
EventRef,
Selector,
SelectorType,
)
from ..core import IRFunction, VisibleFunction
import commands as c
class CreateAdvEvent(ConstructorInsn):
"""Creates an advancement-based event object."""
args = [VirtualString]
argnames = 'event_name'
argdocs = ["The event name"]
rettype = AdvEventRef
insn_name = 'adv_event'
def construct(self):
return AdvEventRef(str(self.event_name))
class CreateTagEvent(ConstructorInsn):
"""Creates a tag-based event object."""
args = [VirtualString]
argnames = 'tag_name'
argdocs = ["The function tag name"]
rettype = TagEventRef
insn_name = 'tag_event'
def construct(self):
return TagEventRef(str(self.tag_name))
class AddEventCondition(PreambleInsn):
"""Add a condition to an event that must be true for the event handler
to be invoked."""
args = [AdvEventRef, VirtualString, VirtualString]
argnames = 'event path value'
argdocs = ["Event to add the condition to", "JSON path in the advancement",
"Value that must match"]
insn_name = 'add_event_condition'
def preapply(self, preamble):
self.event.add_condition(tuple(str(self.path).split('.')),
str(self.value))
class EventHandler(PreambleInsn):
"""Add an event handler to the given event specification."""
args = [IRFunction, EventRef]
argnames = 'handler event'
argdocs = ["Event handler", "Event"]
top_preamble_only = True
insn_name = 'event_handler'
def preapply(self, preamble):
if not self.event.is_tag:
self.handler.add_advancement_revoke(self.event)
def declare(self):
self.handler.usage()
def postapply(self, out, top):
assert not self.handler.is_inline
out.write_event_handler(self.handler, self.event)
class FireEventInsn(SingleCommandInsn):
"""Fires a tag-based event to all listeners."""
args = [TagEventRef]
argnames = 'event'
argdocs = ["Tag event to fire"]
insn_name = 'fire_event'
def get_cmd(self, func):
return c.FunctionTag(c.NSName(self.event.name))
class RevokeEventAdvancement(SingleCommandInsn):
"""(Internal) Revokes an advancement to allow an event to re-fire."""
args = [IRFunction]
argnames = 'func'
argdocs = ["Handler"]
insn_name = 'revoke_event_adv'
def get_cmd(self, func):
# Advancement name = handler func name
return c.Advancement('revoke', Selector.new(SelectorType.SENDER) \
.as_resolve(),
'only', c.AdvancementRef(self.func.global_name))
class SetupInsn(PreambleInsn):
"""Tags a function as being part of the setup phase. It is called whenever
the datapack is reloaded."""
args = [VisibleFunction]
argnames = 'func'
argdocs = ["The setup function"]
top_preamble_only = True
insn_name = 'setupfn'
def declare(self):
self.func.usage()
def preapply(self, preamble):
pass
def postapply(self, out, top):
assert not self.func.is_inline
out.write_setup_function(self.func)
|
DSA 450 GFG/PrintAnagrams.py | siddhi-244/CompetitiveProgrammingQuestionBank | 931 | 12649575 |
# Problem : https://practice.geeksforgeeks.org/problems/print-anagrams-together/1
# Input:
# N = 5
# words[] = {act,god,cat,dog,tac}
# Output:
# god dog
# act cat tac
# Explanation:
# There are 2 groups of
# anagrams "god", "dog" make group 1.
# "act", "cat", "tac" make group 2.
from collections import defaultdict
def Anagrams(words,n):
'''
words: list of word
n: no of words
return : list of group of anagram {list will be sorted in driver code (not word in grp)}
'''
#code here
anagrams = defaultdict(list)
for word in words:
anagrams["".join(sorted(word))].append(word)
return anagrams.values()
# Driver Code
if __name__ =='__main__':
t= int(input())
for tcs in range(t):
n= int(input())
words=input().split()
ans=Anagrams(words,n)
for grp in sorted(ans):
for word in grp:
print(word,end=' ')
print()
# Used default dict from collections module . It does not raise key value error |
emukit/examples/fabolas/fabolas_model.py | ndalchau/emukit | 152 | 12649583 | from copy import deepcopy
from typing import Tuple
import GPy
import numpy as np
from emukit.model_wrappers.gpy_model_wrappers import GPyModelWrapper
class FabolasKernel(GPy.kern.Kern):
def __init__(self, input_dim, basis_func, a=1., b=1., active_dims=None):
super(FabolasKernel, self).__init__(input_dim, active_dims, "fabolas_kernel")
assert input_dim == 1
self.basis_func = basis_func
self.a = GPy.core.parameterization.Param("a", a)
self.b = GPy.core.parameterization.Param("b", b)
self.link_parameters(self.a, self.b)
def K(self, X, X2):
if X2 is None: X2 = X
X_ = self.basis_func(X)
X2_ = self.basis_func(X2)
k = np.dot(X_ * self.b, X2_.T) + self.a
return k
def update_gradients_full(self, dL_dK, X, X2):
if X2 is None: X2 = X
X_ = self.basis_func(X)
X2_ = self.basis_func(X2)
self.a.gradient = np.sum(dL_dK)
self.b.gradient = np.sum(np.dot(np.dot(X_, X2_.T), dL_dK))
def Kdiag(self, X):
return np.diag(self.K(X, X))
def linear(s):
return s
def quad(s):
return (1 - s) ** 2
def transform(s, s_min, s_max):
s_transform = (np.log2(s) - np.log2(s_min)) / (np.log2(s_max) - np.log2(s_min))
return s_transform
def retransform(s_transform, s_min, s_max):
s = np.rint(2 ** (s_transform * (np.log2(s_max) - np.log2(s_min)) + np.log2(s_min)))
return s
class FabolasModel(GPyModelWrapper):
def __init__(self, X_init: np.ndarray, Y_init: np.ndarray,
s_min: float, s_max: float, basis_func=linear, noise: float = 1e-6):
"""
Fabolas Gaussian processes model which models the validation error / cost of
hyperparameter configurations across training dataset subsets.
:param X_init: training data points
:param Y_init: training targets
:param basis_func: basis function which describes the change in performance across dataset subsets
:param noise: observation noise added to the diagonal of the kernel matrix
"""
self.noise = noise
self.s_min = s_min
self.s_max = s_max
self._X = deepcopy(X_init)
self._X[:, -1] = transform(self._X[:, -1], self.s_min, self.s_max)
self._Y = Y_init
self.basis_func = basis_func
kernel = GPy.kern.Matern52(input_dim=self._X.shape[1] - 1, active_dims=[i for i in range(self._X.shape[1] - 1)],
variance=np.var(self._Y), ARD=True)
kernel *= FabolasKernel(input_dim=1, active_dims=[self._X.shape[1] - 1], basis_func=basis_func)
kernel += GPy.kern.White(input_dim=1, active_dims=[self._X.shape[1] - 1], variance=1e-6)
gp = GPy.models.GPRegression(self._X, self._Y, kernel=kernel, noise_var=noise)
gp.kern.set_prior(GPy.priors.Uniform(0, 5))
gp.likelihood.constrain_positive()
super(FabolasModel, self).__init__(gpy_model=gp, n_restarts=3)
def predict(self, X):
"""
:param X: (n_points x n_dimensions) array containing locations at which to get predictions
:return: (mean, variance) Arrays of size n_points x 1 of the predictive distribution at each input location
"""
X_ = deepcopy(X)
X_[:, -1] = transform(X_[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).predict(X_)
def set_data(self, X, Y):
"""
Sets training data in model
:param X: New training features
:param Y: New training outputs
"""
self._X = deepcopy(X)
self._X[:, -1] = transform(self._X[:, -1], self.s_min, self.s_max)
self._Y = Y
try:
self.model.set_XY(self._X, self.Y)
except:
kernel = GPy.kern.Matern52(input_dim=self._X.shape[1] - 1,
active_dims=[i for i in range(self._X.shape[1] - 1)],
variance=np.var(self.Y), ARD=True)
kernel *= FabolasKernel(input_dim=1, active_dims=[self._X.shape[1] - 1], basis_func=self.basis_func)
kernel *= GPy.kern.OU(input_dim=1, active_dims=[self._X.shape[1] - 1])
self.model = GPy.models.GPRegression(self._X, self.Y, kernel=kernel, noise_var=self.noise)
self.model.likelihood.constrain_positive()
def get_f_minimum(self):
"""
Predicts for all observed data points the validation error on the full dataset and returns
the smallest mean prediciton
:return: Array of size 1 x 1
"""
proj_X = deepcopy(self._X)
proj_X[:, -1] = np.ones(proj_X.shape[0]) * self.s_max
mean_highest_dataset = self.model.predict(proj_X)
return np.min(mean_highest_dataset, axis=0)
@property
def X(self):
X = deepcopy(self._X)
X[:, -1] = retransform(X[:, -1], self.s_min, self.s_max)
return X
@property
def Y(self):
return self._Y
def get_prediction_gradients(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
:param X: (n_points x n_dimensions) array containing locations at which to get gradient of the predictions
:return: (mean gradient, variance gradient) n_points x n_dimensions arrays of the gradients of the predictive
distribution at each input location
"""
X_ = deepcopy(X)
X_[:, -1] = transform(X_[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).get_prediction_gradients(X_)
def predict_covariance(self, X: np.ndarray, with_noise: bool = True) -> np.ndarray:
"""
Calculates posterior covariance between points in X
:param X: Array of size n_points x n_dimensions containing input locations to compute posterior covariance at
:param with_noise: Whether to include likelihood noise in the covariance matrix
:return: Posterior covariance matrix of size n_points x n_points
"""
X_ = deepcopy(X)
X_[:, -1] = transform(X_[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).predict_covariance(X_, with_noise)
def get_covariance_between_points(self, X1: np.ndarray, X2: np.ndarray) -> np.ndarray:
"""
Calculate posterior covariance between two points
:param X1: An array of shape 1 x n_dimensions that contains a data single point. It is the first argument of the
posterior covariance function
:param X2: An array of shape n_points x n_dimensions that may contain multiple data points. This is the second
argument to the posterior covariance function.
:return: An array of shape n_points x 1 of posterior covariances between X1 and X2
"""
X_1 = deepcopy(X1)
X_1[:, -1] = transform(X_1[:, -1], self.s_min, self.s_max)
X_2 = deepcopy(X2)
X_2[:, -1] = transform(X_2[:, -1], self.s_min, self.s_max)
return super(FabolasModel, self).get_covariance_between_points(X1, X2)
|
navec/tar.py | FreedomSlow/navec | 115 | 12649603 |
import tarfile
from io import BytesIO
from .record import Record
class Tar(Record):
__attributes__ = ['path']
mode = 'r'
def __init__(self, path):
self.path = path
def __enter__(self):
self.tar = tarfile.open(self.path, self.mode)
return self
def __exit__(self, *args):
self.tar.close()
def load(self, filename):
member = self.tar.getmember(filename)
return self.tar.extractfile(member)
class DumpTar(Tar):
mode = 'w'
def dump(self, bytes, filename):
file = BytesIO(bytes)
info = tarfile.TarInfo(filename)
info.size = len(bytes)
self.tar.addfile(tarinfo=info, fileobj=file)
|
test/hummingbot/strategy/perpetual_market_making/test_perpetual_market_making_start.py | BGTCapital/hummingbot | 3,027 | 12649604 | from decimal import Decimal
import unittest.mock
import hummingbot.strategy.perpetual_market_making.start as strategy_start
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.strategy.perpetual_market_making.perpetual_market_making_config_map import (
perpetual_market_making_config_map as c_map
)
from test.hummingbot.strategy import assign_config_default
class PerpetualMarketMakingStartTest(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.strategy = None
self.markets = {"binance": ExchangeBase()}
self.notifications = []
self.log_errors = []
assign_config_default(c_map)
c_map.get("derivative").value = "binance"
c_map.get("market").value = "ETH-USDT"
c_map.get("leverage").value = Decimal("5")
c_map.get("order_amount").value = Decimal("1")
c_map.get("order_refresh_time").value = 60.
c_map.get("bid_spread").value = Decimal("1")
c_map.get("ask_spread").value = Decimal("2")
def _initialize_market_assets(self, market, trading_pairs):
return [("ETH", "USDT")]
def _initialize_markets(self, market_names):
pass
def _notify(self, message):
self.notifications.append(message)
def logger(self):
return self
def error(self, message, exc_info):
self.log_errors.append(message)
def test_strategy_creation(self):
strategy_start.start(self)
self.assertEqual(self.strategy.order_amount, Decimal("1"))
self.assertEqual(self.strategy.order_refresh_time, 60.)
self.assertEqual(self.strategy.bid_spread, Decimal("0.01"))
self.assertEqual(self.strategy.ask_spread, Decimal("0.02"))
|
tests/batchnorm.py | kihyuks/objax | 715 | 12649618 | <gh_stars>100-1000
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for Batch Normalization Layer."""
import os
import unittest
import objax
import numpy as np
# Split CPU cores into 8 devices for tests of sync batch norm
os.environ['XLA_FLAGS'] = ' '.join(os.environ.get('XLA_FLAGS', '').split(' ')
+ ['--xla_force_host_platform_device_count=8'])
class TestBatchnorm(unittest.TestCase):
def test_batchnorm_0d(self):
x = objax.random.normal((64, 8))
bn = objax.nn.BatchNorm0D(8)
# run batch norm in training mode
yt = bn(x, training=True)
self.assertEqual(yt.shape, x.shape)
# run batch norm in eval mode
ye = bn(x, training=False)
self.assertEqual(ye.shape, x.shape)
def test_batchnorm_1d(self):
x = objax.random.normal((64, 4, 16))
bn = objax.nn.BatchNorm1D(4)
# run batch norm in training mode
yt = bn(x, training=True)
self.assertEqual(yt.shape, x.shape)
# run batch norm in eval mode
ye = bn(x, training=False)
self.assertEqual(ye.shape, x.shape)
def test_batchnorm_2d(self):
x = objax.random.normal((64, 3, 16, 16))
bn = objax.nn.BatchNorm2D(3)
# run batch norm in training mode
yt = bn(x, training=True)
self.assertEqual(yt.shape, x.shape)
# run batch norm in eval mode
ye = bn(x, training=False)
self.assertEqual(ye.shape, x.shape)
class TestSyncBatchnorm(unittest.TestCase):
def assertTensorsAlmostEqual(self, a, b):
a = np.array(a)
b = np.array(b)
np.testing.assert_almost_equal(a, b, decimal=5)
def assertTensorsNotEqual(self, a, b):
a = np.array(a)
b = np.array(b)
self.assertGreater(((a - b) ** 2).sum(), 1e-5)
def helper_test_syncbn(self, x, bn_fn, syncbn_fn):
# run regular batch norm in train and eval mode
bn = bn_fn()
yt = bn(x, training=True)
ye = bn(x, training=False)
# run replicated sync batch norm in train and eval mode
sync_bn = syncbn_fn()
sync_bn_train = objax.Parallel(lambda x: sync_bn(x, training=True), vc=sync_bn.vars())
sync_bn_eval = objax.Parallel(lambda x: sync_bn(x, training=False), vc=sync_bn.vars())
with sync_bn.vars().replicate():
yt_syncbn = sync_bn_train(x)
ye_syncbn = sync_bn_eval(x)
# replicated sync bn should have the same behavior as non-replicated regular bn
self.assertTensorsAlmostEqual(yt, yt_syncbn)
self.assertTensorsAlmostEqual(ye, ye_syncbn)
self.assertTensorsAlmostEqual(yt, yt_syncbn)
self.assertTensorsAlmostEqual(bn.running_mean.value, sync_bn.running_mean.value)
self.assertTensorsAlmostEqual(bn.running_var.value, sync_bn.running_var.value)
# run replicated non-sync batch norm - it should yield different result
non_sync_bn = bn_fn()
non_sync_bn_train = objax.Parallel(lambda x: non_sync_bn(x, training=True), vc=non_sync_bn.vars())
non_sync_bn_eval = objax.Parallel(lambda x: non_sync_bn(x, training=False), vc=non_sync_bn.vars())
with non_sync_bn.vars().replicate():
yt_non_syncbn = non_sync_bn_train(x)
ye_non_syncbn = non_sync_bn_eval(x)
self.assertTensorsNotEqual(yt, yt_non_syncbn)
self.assertTensorsNotEqual(ye, ye_non_syncbn)
def test_syncbn_0d(self):
x = objax.random.normal((64, 8))
self.helper_test_syncbn(x, lambda: objax.nn.BatchNorm0D(8), lambda: objax.nn.SyncedBatchNorm0D(8))
def test_syncbn_1d(self):
x = objax.random.normal((64, 4, 16))
self.helper_test_syncbn(x, lambda: objax.nn.BatchNorm1D(4), lambda: objax.nn.SyncedBatchNorm1D(4))
def test_syncbn_2d(self):
x = objax.random.normal((64, 3, 16, 16))
self.helper_test_syncbn(x, lambda: objax.nn.BatchNorm2D(3), lambda: objax.nn.SyncedBatchNorm2D(3))
if __name__ == '__main__':
unittest.main()
|
tests/python3_f_strings/f_strings.py | hixio-mh/plugin-python | 362 | 12649651 | width = 10
precision = 4
value = decimal.Decimal("12.34567")
f"result: {value:{width}.{precision}}"
rf"result: {value:{width}.{precision}}"
foo(f'this SHOULD be a multi-line string because it is '
f'very long and does not fit on one line. And {value} is the value.')
foo('this SHOULD be a multi-line string, but not reflowed because it is '
f'very long and and also unusual. And {value} is the value.')
foo(fR"this should NOT be \t "
rF'a multi-line string \n')
|
extensions/python/src/main/resources/jet_to_python_grpc_server.py | software-is-art/hazelcast | 4,283 | 12649653 | <reponame>software-is-art/hazelcast
import grpc
import sys
import os
import socket
import logging
import importlib
from concurrent import futures
import jet_to_python_pb2
import jet_to_python_pb2_grpc
logger = logging.getLogger('Python PID %d' % os.getpid())
class JetToPythonServicer(jet_to_python_pb2_grpc.JetToPythonServicer):
def __init__(self, handler_function):
self._handler_function = handler_function
def streamingCall(self, request_iterator, context):
for request in request_iterator:
output_list = self._handler_function(request.inputValue)
output_item = jet_to_python_pb2.OutputMessage(outputValue = output_list)
yield output_item
logger.info('gRPC call completed')
def load_handler_function(handler_module_name, handler_function_name):
try:
handler_module = importlib.import_module(handler_module_name)
except ImportError as e:
raise RuntimeError("Cannot import module %s" % (handler_module_name), e)
if not hasattr(handler_module, handler_function_name):
raise RuntimeError("Handler function %s.%s doesn't exist" % (handler_module_name, handler_function_name))
return getattr(handler_module, handler_function_name)
def serve(phoneback_port, handler_module_name, handler_function_name):
# Fail as soon as possible for any simple problem with passed-in arguments
phoneback_port_int = int(phoneback_port)
handler_function = load_handler_function(handler_module_name, handler_function_name)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1), options=[
('grpc.max_send_message_length', 100 * 1024 * 1024),
('grpc.max_receive_message_length', 100 * 1024 * 1024),
('grpc.so_reuseport', 0)
])
jet_to_python_pb2_grpc.add_JetToPythonServicer_to_server(
JetToPythonServicer(handler_function),
server
)
listen_port = server.add_insecure_port('localhost:0')
if listen_port == 0:
logger.error("Couldn't find a port to bind to")
return
phoneback_message = ('%d\n' % listen_port).encode('utf-8')
server.start()
logger.info('started listening on port %d', listen_port)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('localhost', phoneback_port_int))
s.sendall(phoneback_message)
# Wait for a stop signal in stdin
stdin_message = input()
if stdin_message == 'stop':
logger.info('Received a "stop" message from stdin. Stopping the server.')
else:
logger.info('Received an unexpected message from stdin: "%s"' % stdin_message)
server.stop(0).wait()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, format='%(asctime)s %(levelname)s [%(name)s] %(threadName)s - %(message)s', level=logging.INFO)
# Expecting these command-line parameters:
# - $1 is the port where Jet is listening for the Python process to
# 'phone back' and tell Jet on which port it started its gRPC endpoint.
# - $2.$3 is the module.function of the handler function that will handle
# the input from Jet.
serve(phoneback_port=sys.argv[1], handler_module_name=sys.argv[2], handler_function_name=sys.argv[3])
|
nodes/1.x/python/Group.Ungroup.py | jdehotin/Clockworkfordynamo | 147 | 12649684 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
from System.Collections.Generic import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
groups = UnwrapElement(IN[0])
elementlist = list()
TransactionManager.Instance.EnsureInTransaction(doc)
for group in groups:
try:
ids = group.UngroupMembers()
ungrouped = list()
for id in ids:
ungrouped.append(group.Document.GetElement(id))
elementlist.append(ungrouped)
except:
elementlist.append(list())
TransactionManager.Instance.TransactionTaskDone()
OUT = elementlist |
src/genie/libs/parser/iosxe/tests/ShowStandbyAll/cli/equal/golden_output4_expected.py | balmasea/genieparser | 204 | 12649701 | <gh_stars>100-1000
expected_output = {
"BDI3147": {
"interface": "BDI3147",
"redirects_disable": False,
"address_family": {
"ipv4": {
"version": {
1: {
"groups": {
31: {
"group_number": 31,
"hsrp_router_state": "active",
"statistics": {
"num_state_changes": 17
},
"last_state_change": "12w6d",
"primary_ipv4_address": {
"address": "10.190.99.49"
},
"virtual_mac_address": "0000.0c07.ac1f",
"virtual_mac_address_mac_in_use": True,
"local_virtual_mac_address": "0000.0c07.ac1f",
"local_virtual_mac_address_conf": "v1 default",
"timers": {
"hello_msec_flag": False,
"hello_sec": 3,
"hold_msec_flag": False,
"hold_sec": 10,
"next_hello_sent": 1.856
},
"active_router": "local",
"standby_priority": 90,
"standby_expires_in": 11.504,
"standby_router": "10.190.99.51",
"standby_ip_address": "10.190.99.51",
"priority": 110,
"configured_priority": 110,
"session_name": "hsrp-BD3147-31"
},
32: {
"group_number": 32,
"hsrp_router_state": "active",
"statistics": {
"num_state_changes": 17
},
"last_state_change": "12w6d",
"primary_ipv4_address": {
"address": "10.188.109.1"
},
"virtual_mac_address": "0000.0c07.ac20",
"virtual_mac_address_mac_in_use": True,
"local_virtual_mac_address": "0000.0c07.ac20",
"local_virtual_mac_address_conf": "v1 default",
"timers": {
"hello_msec_flag": False,
"hello_sec": 3,
"hold_msec_flag": False,
"hold_sec": 10,
"next_hello_sent": 2.496
},
"active_router": "local",
"standby_priority": 90,
"standby_expires_in": 10.576,
"standby_router": "10.188.109.3",
"standby_ip_address": "10.188.109.3",
"priority": 110,
"configured_priority": 110,
"session_name": "hsrp-BD3147-32"
}
}
}
}
}
},
"use_bia": False
}
}
|
flask_admin/tests/test_tools.py | caffeinatedMike/flask-admin | 4,440 | 12649723 | <reponame>caffeinatedMike/flask-admin
from flask_admin import tools
def test_encode_decode():
assert tools.iterdecode(tools.iterencode([1, 2, 3])) == (u'1', u'2', u'3')
assert tools.iterdecode(tools.iterencode([',', ',', ','])) == (u',', u',', u',')
assert tools.iterdecode(tools.iterencode(['.hello.,', ',', ','])) == (u'.hello.,', u',', u',')
assert tools.iterdecode(tools.iterencode(['.....,,,.,,..,.,,.,'])) == (u'.....,,,.,,..,.,,.,',)
assert tools.iterdecode(tools.iterencode([])) == tuple()
# Malformed inputs should not crash
assert tools.iterdecode('.')
assert tools.iterdecode(',') == (u'', u'')
|
Multitasking_with_CircuitPython/code_buttons_without_sleep/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12649750 | <gh_stars>100-1000
"""
This example script shows how to read button state with
debouncing that does not rely on time.sleep().
"""
import board
from digitalio import DigitalInOut, Direction, Pull
btn = DigitalInOut(board.SWITCH)
btn.direction = Direction.INPUT
btn.pull = Pull.UP
prev_state = btn.value
while True:
cur_state = btn.value
if cur_state != prev_state:
if not cur_state:
print("BTN is down")
else:
print("BTN is up")
prev_state = cur_state
|
src/map_renderer/glow.py | XiaoJake/range-mcl | 141 | 12649821 | <reponame>XiaoJake/range-mcl<filename>src/map_renderer/glow.py
#!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
# Brief: OpenGL Object Wrapper (GLOW) in python.
# Some convenience classes to simplify resource management.
import re
from typing import Any, Union
from OpenGL.raw.GL.VERSION.GL_3_0 import GL_R32F, GL_RG
import numpy as np
import OpenGL.GL as gl
gl.ERROR_CHECKING = True
gl.ERROR_ON_COPY = True
gl.WARN_ON_FORMAT_UNAVAILABLE = True
WARN_INVALID_UNIFORMS = False
def vec2(x: float, y: float) -> np.array:
""" returns an vec2-compatible numpy array """
return np.array([x, y], dtype=np.float32)
def vec3(x: float, y: float, z: float) -> np.array:
""" returns an vec3-compatible numpy array """
return np.array([x, y, z], dtype=np.float32)
def vec4(x: float, y: float, z: float, w: float) -> np.array:
""" returns an vec4-compatible numpy array """
return np.array([x, y, z, w], dtype=np.float32)
def ivec2(x: int, y: int) -> np.array:
""" returns an ivec2-compatible numpy array """
return np.array([x, y], dtype=np.int32)
def ivec3(x: int, y: int, z: int) -> np.array:
""" returns an ivec3-compatible numpy array """
return np.array([x, y, z], dtype=np.int32)
def ivec4(x: int, y: int, z: int, w: int) -> np.array:
""" returns an ivec4-compatible numpy array """
return np.array([x, y, z, w], dtype=np.int32)
def uivec2(x: int, y: int) -> np.array:
""" returns an uivec2-compatible numpy array """
return np.array([x, y], dtype=np.uint32)
def uivec3(x: int, y: int, z: int) -> np.array:
""" returns an uivec3-compatible numpy array """
return np.array([x, y, z], dtype=np.uint32)
def uivec4(x: int, y: int, z: int, w: int) -> np.array:
""" returns an uivec4-compatible numpy array """
return np.array([x, y, z, w], dtype=np.uint32)
class GlBuffer:
""" Buffer object representing a vertex array buffer.
"""
def __init__(self, target: gl.Constant = gl.GL_ARRAY_BUFFER, usage: gl.Constant = gl.GL_STATIC_DRAW):
self.id_ = gl.glGenBuffers(1)
self.target_ = target
self.usage_ = usage
self._size = 0
def __del__(self):
gl.glDeleteBuffers(1, [self.id_])
def assign(self, array: np.array):
""" set buffer content to given numpy array. """
gl.glBindBuffer(self.target_, self.id_)
gl.glBufferData(self.target_, array, self.usage_)
gl.glBindBuffer(self.target_, 0)
self._size = array.shape[0]
def bind(self):
""" bind buffer """
gl.glBindBuffer(self.target_, self.id_)
def release(self):
""" release buffer """
gl.glBindBuffer(self.target_, 0)
@property
def id(self) -> int:
""" get buffer id """
return self.id_
@property
def usage(self) -> gl.Constant:
""" get buffer usage """
return self.usage_
@property
def target(self) -> gl.Constant:
""" get buffer target """
return self.target_
@property
def size(self) -> int:
return self._size
class GlTextureBuffer:
"""
Texture based on a GlBuffer object. A texture object, where the texture's data is stored in a buffer object.
"""
def __init__(self, buffer: GlBuffer, tex_format):
self._buffer = buffer # keep here a reference to avoid garbage collection.
self.id_ = gl.glGenTextures(1)
self._tex_format = tex_format
gl.glBindTexture(gl.GL_TEXTURE_BUFFER, self.id_)
self._buffer.bind()
gl.glTexBuffer(gl.GL_TEXTURE_BUFFER, self._tex_format, self._buffer.id)
self._buffer.release()
def __del__(self):
gl.glDeleteBuffers(1, np.array([self.id_]))
def bind(self):
gl.glBindTexture(gl.GL_TEXTURE_BUFFER, self.id_)
def release(self):
gl.glBindTexture(gl.GL_TEXTURE_BUFFER, 0)
class GlTextureRectangle:
""" TextureRectangle
TODO: make GlTextureRectangle and GlTexture2D subclasses of GlTextureBase
"""
def __init__(self, width, height, internalFormat=gl.GL_RGBA, format=gl.GL_RGBA):
self.id_ = gl.glGenTextures(1)
self.internalFormat_ = internalFormat # gl.GL_RGB_FLOAT, gl.GL_RGB_UNSIGNED, ...
self.format = format # GL_RG. GL_RG_INTEGER, ...
self.width_ = width
self.height_ = height
gl.glBindTexture(gl.GL_TEXTURE_RECTANGLE, self.id_)
gl.glTexParameteri(gl.GL_TEXTURE_RECTANGLE, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_RECTANGLE, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_RECTANGLE, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER)
gl.glTexParameteri(gl.GL_TEXTURE_RECTANGLE, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER)
gl.glBindTexture(gl.GL_TEXTURE_RECTANGLE, 0)
def __del__(self):
gl.glDeleteTextures(1, [self.id_])
def bind(self, texture_unit_id: int):
""" bind texture to given texture unit.
Args:
texture_unit_id (int): id of texture unit to which the texture should be bound.
"""
gl.glActiveTexture(gl.GL_TEXTURE0 + int(texture_unit_id))
gl.glBindTexture(gl.GL_TEXTURE_RECTANGLE, self.id_)
def release(self, texture_unit_id: int):
""" release texture from given texture unit.
Args:
texture_unit_id (int): id of texture unit from which the texture should be released.
"""
gl.glActiveTexture(gl.GL_TEXTURE0 + int(texture_unit_id))
gl.glBindTexture(gl.GL_TEXTURE_RECTANGLE, 0)
def assign(self, array: np.array):
""" assign data from given numpy array to the texture.
Depending on the dtype of np.array different texture uploads are triggered.
Args:
array (np.array): data represented as numpy array.
Raises:
NotImplementedError: raised when unsupported dtype of the given array is encountered.
"""
gl.glBindTexture(gl.GL_TEXTURE_RECTANGLE, self.id_)
if array.dtype == np.uint8:
gl.glTexImage2D(gl.GL_TEXTURE_RECTANGLE, 0, self.internalFormat_, self.width_, self.height_, 0, self.format,
gl.GL_UNSIGNED_BYTE, array)
elif array.dtype == np.float32:
gl.glTexImage2D(gl.GL_TEXTURE_RECTANGLE, 0, self.internalFormat_, self.width_, self.height_, 0, self.format,
gl.GL_FLOAT, array)
else:
raise NotImplementedError("pixel type not implemented.")
gl.glBindTexture(gl.GL_TEXTURE_RECTANGLE, 0)
@property
def id(self):
""" get the texture's OpenGL id. """
return self.id_
class GlTexture2D:
""" Texture2D
"""
def __init__(self, width, height, internal_format=gl.GL_RGBA):
self.id_ = gl.glGenTextures(1)
self.internal_format_ = internal_format # gl.GL_RGB_FLOAT, gl.GL_RGB_UNSIGNED, ...
self.width_ = width
self.height_ = height
gl.glBindTexture(gl.GL_TEXTURE_2D, self.id_)
self._allocateMemory()
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
# gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER)
# gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
def __del__(self):
gl.glDeleteTextures([self.id_])
def bind(self, texture_unit_id: int):
""" bind texture to given texture unit.
Args:
texture_unit_id (int): id of texture unit to which the texture should be bound.
"""
gl.glActiveTexture(gl.GL_TEXTURE0 + int(texture_unit_id))
gl.glBindTexture(gl.GL_TEXTURE_2D, self.id_)
def release(self, texture_unit_id: int): # pylint: disable=no-self-use
""" release texture from given texture unit.
Args:
texture_unit_id (int): id of texture unit from which the texture should be released.
"""
gl.glActiveTexture(gl.GL_TEXTURE0 + int(texture_unit_id))
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
def assign(self, array: np.array, format: gl.GLenum):
"""assign data from given numpy array to the texture.
Depending on the dtype of np.array different texture uploads are triggered.
Args:
array (np.array): data represented as numpy array.
Raises:
NotImplementedError: raised when unsupported dtype of the given array is encountered.
"""
gl.glBindTexture(gl.GL_TEXTURE_2D, self.id_)
if array.dtype == np.uint8:
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, self.internal_format_, self.width_, self.height_, 0, format,
gl.GL_UNSIGNED_BYTE, array)
elif array.dtype == np.uint32:
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, self.internal_format_, self.width_, self.height_, 0, format,
gl.GL_UNSIGNED_INT, array)
elif array.dtype == np.float32:
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, self.internal_format_, self.width_, self.height_, 0, format, gl.GL_FLOAT,
array)
else:
raise NotImplementedError("pixel type not implemented.")
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
def set_filter(self, min_filter: gl.Constant, mag_filter: gl.Constant):
""" set the filter operations performance when up-/down-sampling of texture is required.
Args:
min_filter (gl.Constant): filtering used for down-sampling
mag_filter (gl.Constant): filtering used for up-sampling
"""
gl.glBindTexture(gl.GL_TEXTURE_2D, self.id_)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, min_filter)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, mag_filter)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
def resize(self, width: int, height: int):
""" resize texture to given width and height. """
self.width_ = width
self.height_ = height
# need to copy?
def download(self) -> np.array:
gl.glBindTexture(gl.GL_TEXTURE_2D, self.id_)
array = gl.glGetTexImage(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, gl.GL_FLOAT)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
return array
@property
def id(self) -> gl.GLuint:
""" get the texture's id """
return self.id_
@property
def width(self) -> int:
""" get the texture's width """
return self.width_
@property
def height(self) -> int:
""" get the texture's height """
return self.height_
def _allocateMemory(self):
pixel_format = gl.GL_RGBA
pixel_type = gl.GL_UNSIGNED_BYTE
if self.internal_format_ in [gl.GL_R, gl.GL_RG, gl.GL_RGB, gl.GL_RGBA]:
pixel_type = gl.GL_UNSIGNED_BYTE
elif self.internal_format_ in [gl.GL_R32I, gl.GL_RG32I, gl.GL_RGB32I, gl.GL_RGBA32I]:
pixel_type = gl.GL_INT
elif self.internal_format_ in [gl.GL_R32F, gl.GL_RG32F, gl.GL_RGB32F, gl.GL_RGBA32F]:
pixel_type = gl.GL_FLOAT
if self.internal_format_ in [gl.GL_R, GL_R32F]:
pixel_format = gl.GL_RED
elif self.internal_format_ in [gl.GL_RG, gl.GL_RG32F]:
pixel_format = gl.GL_RG
elif self.internal_format_ in [gl.GL_RGB, gl.GL_RGB32F]:
pixel_format = gl.GL_RGB
elif self.internal_format_ in [gl.GL_RGBA, gl.GL_RGBA32F]:
pixel_format = gl.GL_RGBA
elif self.internal_format_ in [gl.GL_R32I, gl.GL_R32UI]:
pixel_format = gl.GL_RED_INTEGER
elif self.internal_format_ in [gl.GL_RG32I, gl.GL_RG32UI]:
pixel_format = gl.GL_RG_INTEGER
elif self.internal_format_ in [gl.GL_RGB32I, gl.GL_RGB32UI]:
pixel_format = gl.GL_RGB_INTEGER
elif self.internal_format_ in [gl.GL_RGBA32I, gl.GL_RGBA32UI]:
pixel_format = gl.GL_RGBA_INTEGER
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, self.internal_format_, self.width_, self.height_, 0, pixel_format, pixel_type,
None)
class GlTexture1D:
""" Texture1D
1-dimensional texture.
"""
def __init__(self, width, internalFormat=gl.GL_RGBA):
self.id_ = gl.glGenTextures(1)
self.internalFormat_ = internalFormat # gl.GL_RGB_FLOAT, gl.GL_RGB_UNSIGNED, ...
self.width_ = width
gl.glBindTexture(gl.GL_TEXTURE_1D, self.id_)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER)
gl.glTexParameteri(gl.GL_TEXTURE_1D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER)
# gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, self.internalFormat_,
# width_, height_, 0, gl.format, gl.GL_UNSIGNED_BYTE, None)
gl.glBindTexture(gl.GL_TEXTURE_1D, 0)
def __del__(self):
gl.glDeleteTextures(1, [self.id_])
def bind(self, textureUnitId: int):
gl.glActiveTexture(gl.GL_TEXTURE0 + int(textureUnitId))
gl.glBindTexture(gl.GL_TEXTURE_1D, self.id_)
def release(self, textureUnitId: int):
gl.glActiveTexture(gl.GL_TEXTURE0 + int(textureUnitId))
gl.glBindTexture(gl.GL_TEXTURE_1D, 0)
def assign(self, array: np.array, format: gl.GLenum):
gl.glBindTexture(gl.GL_TEXTURE_1D, self.id_)
if array.dtype == np.uint8:
gl.glTexImage1D(gl.GL_TEXTURE_1D, 0, self.internalFormat_, self.width_, 0, format, gl.GL_UNSIGNED_BYTE, array)
elif array.dtype == np.uint32:
gl.glTexImage1D(gl.GL_TEXTURE_1D, 0, self.internalFormat_, self.width_, 0, format, gl.GL_UNSIGNED_INT, array)
elif array.dtype == np.float32:
gl.glTexImage1D(gl.GL_TEXTURE_1D, 0, self.internalFormat_, self.width_, 0, format, gl.GL_FLOAT, array)
else:
raise NotImplementedError("pixel type not implemented.")
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
def resize(self, width: int):
""" resize texture. """
self.width_ = width
@property
def id(self) -> gl.GLuint:
""" get the id of the program. """
return self.id_
@property
def width(self) -> int:
""" get width of the texture. """
return self.width_
class GlShader:
""" OpenGL shader
"""
def __init__(self, shader_type, source):
self._code = source
self._shader_type = shader_type
self.id_ = gl.glCreateShader(self._shader_type)
gl.glShaderSource(self.id_, source)
gl.glCompileShader(self.id_)
success = gl.glGetShaderiv(self.id_, gl.GL_COMPILE_STATUS)
if success == gl.GL_FALSE:
error_string = gl.glGetShaderInfoLog(self.id_).decode("utf-8")
readable_error = []
source_lines = source.split("\n")
for line in error_string.split("\n"):
match = re.search(r"\(([0-9]+)\) : ([\s\w]+): ([\s\w\S\W]+)", line)
if match:
lineno = match.group(1)
errorno = match.group(2)
message = match.group(3)
readable_message = "{}: {} at line {}:\n {}: {}".format(errorno, message, lineno, lineno,
source_lines[int(lineno)].strip())
readable_error.append(readable_message)
else:
readable_error.append(line)
raise RuntimeError("\n".join(readable_error))
def __del__(self):
gl.glDeleteShader(self.id_)
@property
def type(self) -> gl.Constant:
""" return shader type """
return self._shader_type
@property
def id(self) -> gl.GLuint:
""" get id of shader """
return self.id_
@property
def code(self):
""" get shader source code. """
return self._code
@staticmethod
def from_file(shader_type: gl.Constant, filename: str):
""" load and initialize shader from given filename """
f = open(filename)
source = "".join(f.readlines())
# todo: preprocess.
f.close()
return GlShader(shader_type, source)
class GlProgram:
""" An OpenGL program handle. """
def __init__(self):
self.id_ = gl.glCreateProgram()
self._shaders = {}
self._uniform_types = {}
self.is_linked = False
# todo: figure this out.
def __del__(self):
gl.glDeleteProgram(self.id_)
def bind(self):
if not self.is_linked:
raise RuntimeError("Program must be linked before usage.")
gl.glUseProgram(self.id_)
def release(self):
gl.glUseProgram(0)
def attach(self, shader):
self._shaders[shader.type] = shader
@property
def id(self):
return self.id_
def __setitem__(self, name: str, value: Any):
# quitely ignore
if name not in self._uniform_types:
if WARN_INVALID_UNIFORMS: print("No uniform with name '{}' available.".format(name))
return
current = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM)
if current != self.id_: self.bind()
loc = gl.glGetUniformLocation(self.id_, name)
T = self._uniform_types[name]
if T == "int":
gl.glUniform1i(loc, np.int32(value))
elif T == "uint":
gl.glUniform1ui(loc, np.uint32(value))
elif T == "float":
gl.glUniform1f(loc, np.float32(value))
elif T == "bool":
gl.glUniform1f(loc, value)
elif T == "vec2":
gl.glUniform2fv(loc, 1, value)
elif T == "vec3":
gl.glUniform3fv(loc, 1, value)
elif T == "vec4":
gl.glUniform4fv(loc, 1, value)
elif T == "ivec2":
gl.glUniform2iv(loc, 1, value)
elif T == "ivec3":
gl.glUniform3iv(loc, 1, value)
elif T == "ivec4":
gl.glUniform4iv(loc, 1, value)
elif T == "uivec2":
gl.glUniform2uiv(loc, 1, value)
elif T == "uivec3":
gl.glUniform3uiv(loc, 1, value)
elif T == "uivec4":
gl.glUniform4uiv(loc, 1, value)
elif T == "mat4":
# print("set matrix: ", value)
gl.glUniformMatrix4fv(loc, 1, False, value.T.astype(np.float32))
elif T.endswith("sampler1D"):
gl.glUniform1i(loc, np.int32(value))
elif T.endswith("sampler2D"):
gl.glUniform1i(loc, np.int32(value))
elif T.endswith("sampler2DRect"):
gl.glUniform1i(loc, np.int32(value))
else:
raise NotImplementedError("uniform type {} not implemented. :(".format(T))
if current != self.id_: gl.glUseProgram(current)
def link(self):
if gl.GL_VERTEX_SHADER not in self._shaders or gl.GL_FRAGMENT_SHADER not in self._shaders:
raise RuntimeError("program needs at least vertex and fragment shader")
for shader in self._shaders.values():
gl.glAttachShader(self.id_, shader.id)
for line in shader.code.split("\n"):
match = re.search(r"uniform\s+(\S+)\s+(\S+)\s*;", line)
if match:
self._uniform_types[match.group(2)] = match.group(1)
gl.glLinkProgram(self.id_)
is_linked = bool(gl.glGetProgramiv(self.id_, gl.GL_LINK_STATUS))
if not is_linked:
msg = gl.glGetProgramInfoLog(self.id_)
raise RuntimeError(str(msg.decode("utf-8")))
# after linking we don't need the source code anymore.
# for shader in self.shaders_:
# del shader
self._shaders = {}
self.is_linked = True
class GlRenderbuffer:
""" Wrapper for an OpenGL's renderbuffer """
def __init__(self, width: int, height: int, renderbuffer_format=gl.GL_RGBA):
self._format = renderbuffer_format
self._width = width
self._height = height
self._id = gl.glGenRenderbuffers(1)
# allocate storage.
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self._id)
gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, self._format, self._width, self._height)
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0)
def __del__(self):
gl.glDeleteRenderbuffers(1, [self._id])
def bind(self):
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self._id)
def release(self):
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0)
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def id(self):
return self._id
class GlFramebuffer:
""" Wrapper for an OpenGL's framebuffer. """
def __init__(self, width: int, height: int):
self._id = gl.glGenFramebuffers(1)
self._target = gl.GL_FRAMEBUFFER
self._attachments = {}
self._valid = False
self._width = width
self._height = height
def __del__(self):
gl.glDeleteFramebuffers(1, [self._id])
def bind(self):
if not self._valid: raise RuntimeError("Incomplete framebuffer.")
gl.glBindFramebuffer(self._target, self._id)
def release(self):
gl.glBindFramebuffer(self._target, 0)
def attach(self, target: int, attachment: Union[GlTexture2D, GlTextureRectangle, GlRenderbuffer]):
""" attach Texture or Renderbuffer to given attachment target.
Args:
target: attachment target, e.g., GL_COLOR_ATTACHMENT0, GL_DEPTH_STENCIL_ATTACHMENT, ...
attachment: texture or renderbuffer to attach.
"""
if isinstance(attachment, (GlTexture2D, GlTextureRectangle)):
texture_target = None
if isinstance(attachment, GlTexture2D): texture_target = gl.GL_TEXTURE_2D
if isinstance(attachment, GlTextureRectangle): texture_target = gl.GL_TEXTURE_RECTANGLE
gl.glBindFramebuffer(self._target, self._id)
gl.glFramebufferTexture2D(self._target, target, texture_target, attachment.id, 0)
gl.glBindFramebuffer(self._target, 0)
elif isinstance(attachment, GlRenderbuffer):
gl.glBindFramebuffer(self._target, self._id)
gl.glFramebufferRenderbuffer(self._target, target, gl.GL_RENDERBUFFER, attachment.id)
gl.glBindFramebuffer(self._target, 0)
else:
raise ValueError("texture should be GlTexture2D, GlTextureRectangle or GlRenderbuffer.")
self._attachments[target] = attachment
gl.glBindFramebuffer(self._target, self._id)
self._valid = gl.glCheckFramebufferStatus(self._target) == gl.GL_FRAMEBUFFER_COMPLETE
gl.glBindFramebuffer(self._target, 0)
@property
def valid(self):
""" is framebuffer valid? """
return self._valid
@property
def height(self):
""" framebuffer's height """
return self._height
@property
def width(self):
""" framebuffer's width """
return self._width |
regtests/c++/array_sized.py | ahakingdom/Rusthon | 622 | 12649830 | <reponame>ahakingdom/Rusthon
'''
array with default size
'''
class A:
pass
def somefunc():
a = [5]int(1,2,3,4,5)
print('len a:', len(a))
a.pop()
print('len a:', len(a))
print(a[0])
print(a[1])
b = [10]int()
print('len b:', len(b))
print b[0]
print b[1]
c = [10]f64( 1.1, 2.2, 3.3 )
print c[0]
print c[1]
print c[2]
x = A()
y = A()
d = [4]A( x,y )
print d[0]
print d[1]
def main():
somefunc()
print('OK') |
nndet/inference/detection/__init__.py | joeranbosma/nnDetection | 242 | 12649834 | from nndet.inference.detection.wbc import batched_wbc, wbc
from nndet.inference.detection.model import batched_nms_model
from nndet.inference.detection.ensemble import batched_wbc_ensemble, batched_nms_ensemble, \
wbc_nms_no_label_ensemble
|
tests/sectools/test_domain_utils.py | kubajir/msticpy | 820 | 12649837 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""domain_utilstes extract test class."""
import pytest_check as check
from msticpy.sectools import domain_utils
def test_validate_domain():
test_dom_val = domain_utils.DomainValidator()
valid_tld = test_dom_val.validate_tld("www.microsoft.com")
resolvable = test_dom_val.is_resolvable("www.microsoft.com")
blacklisted = test_dom_val.in_abuse_list("www.microsoft.com")
assert valid_tld
assert resolvable
assert not blacklisted[0]
def test_validate_domain_fail():
test_dom_val = domain_utils.DomainValidator()
valid_tld = test_dom_val.validate_tld("www.contoso.garbage")
resolvable = test_dom_val.is_resolvable("www.contoso.garbage")
blacklisted = test_dom_val.in_abuse_list("www.contoso.garbage")
assert not valid_tld
assert not resolvable
assert not blacklisted[0]
assert blacklisted[1] is None
def test_resolver_funcs():
"""Test domain utils functions."""
result = domain_utils.dns_resolve("www.microsoft.com")
check.is_not_none(result["qname"])
check.is_true(result["rrset"])
ip = result["rrset"][0]
result = domain_utils.dns_resolve("www.contoso.garbage")
check.is_not_none(result)
check.is_false(result.get("rrset"))
result = domain_utils.ip_rev_resolve(ip)
check.is_not_none(result)
result = domain_utils.dns_components("www.microsoft.com")
check.equal(result["subdomain"], "www")
check.equal(result["domain"], "microsoft")
check.equal(result["suffix"], "com")
result = domain_utils.url_components("http://www.microsoft.com")
check.equal(result["scheme"], "http")
check.equal(result["host"], "www.microsoft.com")
|
ggplot/geoms/__init__.py | themiwi/ggplot | 1,133 | 12649844 | from .geom_abline import geom_abline
from .geom_area import geom_area
from .geom_bar import geom_bar
from .geom_bin2d import geom_bin2d
from .geom_blank import geom_blank
from .geom_boxplot import geom_boxplot
from .geom_density import geom_density
from .geom_errorbar import geom_errorbar
from .geom_histogram import geom_histogram
from .geom_hline import geom_hline
from .geom_jitter import geom_jitter
from .geom_line import geom_line
from .geom_now_its_art import geom_now_its_art
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_polygon import geom_polygon
from .geom_rect import geom_rect
from .geom_ribbon import geom_ribbon
from .geom_step import geom_step
from .geom_text import geom_text
from .geom_tile import geom_tile
from .geom_violin import geom_violin
from .geom_vline import geom_vline
|
mzutils/json_misc.py | Mohan-Zhang-u/mzutils | 132 | 12649846 | <gh_stars>100-1000
import codecs
import json
def dump_json(dictionary, file_path):
"""
:param dict:
:param file_path:
:return:
"""
with codecs.open(file_path, 'w+', encoding='utf-8') as fp:
json.dump(dictionary, fp)
def load_json(file_path):
"""
:param file_path:
:return: dict object
"""
with codecs.open(file_path, 'r', encoding='utf-8') as fp:
return json.load(fp)
|
modules/unicode.py | nikolas/jenni | 133 | 12649870 | #!/usr/bin/env python
"""
unicode.py - jenni Unicode Module
Copyright 2010-2013, yano (yanovich.net)
Licensed under the Eiffel Forum License 2.
More info:
* jenni: https://github.com/myano/jenni/
* Phenny: http://inamidst.com/phenny/
"""
import re
import unicodedata
import urlparse
control_chars = ''.join(map(unichr, range(0,32) + range(127,160)))
control_char_re = re.compile(u'[%s]' % re.escape(control_chars))
def supercombiner(jenni, input):
""".sc -- displays the infamous supercombiner"""
s = 'u'
for i in xrange(1, 3000):
if unicodedata.category(unichr(i)) == "Mn":
s += unichr(i)
if len(s) > 100:
break
jenni.say(s)
supercombiner.commands = ['sc']
supercombiner.rate = 30
def decode(bit):
try:
if isinstance(bit, str) or isinstance(bit, unicode):
text = bit.decode('utf-8')
else:
text = str()
except UnicodeDecodeError:
try:
text = bit.decode('iso-8859-1')
except UnicodeDecodeError:
text = bit.decode('cp1252')
return text
def encode(bit):
try:
if isinstance(bit, str) or isinstance(bit, unicode):
text = bit.encode('utf-8')
else:
text = str()
except UnicodeEncodeError:
try:
text = bit.encode('iso-8859-1')
except UnicodeEncodeError:
text = bit.encode('cp1252')
return text
def urlEncodeNonAscii(b):
return re.sub('[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b)
def iriToUri(iri):
parts = urlparse.urlparse(iri)
return urlparse.urlunparse(
part.encode('idna') if parti == 1 else urlEncodeNonAscii(
part.encode('utf-8'))
for parti, part in enumerate(parts)
)
def remove_control_chars(s):
return control_char_re.sub('', s)
if __name__ == '__main__':
print __doc__.strip()
|
yyets/BagAndDrag/zimuxia/convert_db.py | kuyacai/YYeTsBot | 9,250 | 12649874 | #!/usr/local/bin/python3
# coding: utf-8
# YYeTsBot - convert_db.py
# 2/5/21 13:46
#
__author__ = "Benny <<EMAIL>>"
# convert to mongodb and con_sqlite
import pymongo
import pymysql
import tqdm
import json
from typing import List
con_mysql = pymysql.Connect(host="127.0.0.1", user="root", password="<PASSWORD>", charset="utf8mb4", database="zimuxia",
cursorclass=pymysql.cursors.DictCursor
)
mongo_client = pymongo.MongoClient()
SIZE = 2000
def clear_mongodb():
mongo_client.drop_database("zimuxia")
def clear_mysql():
con_mysql.cursor().execute("truncate table resource;")
con_mysql.commit()
def mysql_insert(data: List[dict]):
sql = "INSERT INTO resource VALUES(NULL,%(url)s,%(name)s,NULL,NULL,%(data)s)"
cur = con_mysql.cursor()
for i in data:
cur.execute(sql, i)
con_mysql.commit()
def mongodb_insert(data: List[dict]):
db = mongo_client["zimuxia"]
col = db["resource"]
col.insert_many(data)
def main():
clear_mongodb()
clear_mysql()
with open("result.json") as f:
data = json.load(f)
# [{"url": "https://www.zimuxia.cn/portfolio/%e6%888b%e5%8f%8b", "name": "我家的女儿交不到男朋友", "data":""}]
mysql_insert(data)
mongodb_insert(data)
if __name__ == '__main__':
main()
con_mysql.close()
mongo_client.close()
|
tests/test_buildoptionsparser.py | druttka/iotedgedev | 111 | 12649924 | import pytest
from iotedgedev.buildoptionsparser import BuildOptionsParser
pytestmark = pytest.mark.unit
def test_filter_build_options():
build_options = [
"--rm",
"-f test",
"--file test",
"-t image",
"--tag image"
]
build_options_parser = BuildOptionsParser(build_options)
assert not build_options_parser.parse_build_options()
def test_parse_to_dict():
build_options = [
"--add-host=github.com:192.168.127.12",
"--add-host=ports.ubuntu.com:172.16.58.3",
"--build-arg a=b",
"--build-arg c=d",
"--label e=f",
"--label g"
]
sdk_options = {
'extra_hosts': {
'github.com': '192.168.127.12',
'ports.ubuntu.com': '172.16.58.3'
},
'buildargs': {
'a': 'b',
'c': 'd'
},
'labels': {
'e': 'f',
'g': ''
}
}
build_options_parser = BuildOptionsParser(build_options)
assert sdk_options == build_options_parser.parse_build_options()
def test_parse_to_list():
build_options = [
"--cache-from a",
"--cache-from b"
]
sdk_options = {
'cache_from': ['a', 'b']
}
build_options_parser = BuildOptionsParser(build_options)
assert sdk_options == build_options_parser.parse_build_options()
def test_parse_val():
build_options = [
"--network bridge",
"--platform Linux",
"--shm-size 1000000",
"--target target"
]
sdk_options = {
'network_mode': 'bridge',
'platform': 'Linux',
'shmsize': '1000000',
'target': 'target'
}
build_options_parser = BuildOptionsParser(build_options)
assert sdk_options == build_options_parser.parse_build_options()
def test_parse_container_limits():
build_options = [
"--cpu-shares 50",
"--cpuset-cpus 0-1",
"--memory 10000000",
"--memory-swap 2000000"
]
sdk_options = {
'container_limits': {
'cpushares': '50',
'cpusetcpus': '0-1',
'memory': '10000000',
'memswap': '2000000'
}
}
build_options_parser = BuildOptionsParser(build_options)
assert sdk_options == build_options_parser.parse_build_options()
def test_parse_flag():
build_options = [
"--pull=true",
"-q=false",
"--no-cache"
]
sdk_options = {
'pull': True,
'quiet': False,
'nocache': True
}
build_options_parser = BuildOptionsParser(build_options)
assert sdk_options == build_options_parser.parse_build_options()
def test_invalid_build_options():
with pytest.raises(KeyError):
build_options = [
"--cgroup-parent",
"--compress",
"--cpu-period",
"--cpuset-mems 10",
]
build_options_parser = BuildOptionsParser(build_options)
build_options_parser.parse_build_options()
def test_filtered_valid_build_options():
build_options = [
"--rm",
"--file test",
"--tag image",
"--add-host=github.com:192.168.127.12",
"--add-host=ports.ubuntu.com:172.16.58.3",
"--cache-from a",
"--cache-from b",
"--network bridge",
"--platform Linux",
"--cpu-shares 50",
"--memory 10000000",
"--pull=true",
"-q=false",
"--no-cache"
]
sdk_options = {
'extra_hosts': {
'github.com': '192.168.127.12',
'ports.ubuntu.com': '172.16.58.3'
},
'cache_from': ['a', 'b'],
'network_mode': 'bridge',
'platform': 'Linux',
'container_limits': {
'cpushares': '50',
'memory': '10000000',
},
'pull': True,
'quiet': False,
'nocache': True
}
build_options_parser = BuildOptionsParser(build_options)
assert sdk_options == build_options_parser.parse_build_options()
|
examples/slow_task.py | avivazran/UnrealEnginePython | 2,350 | 12649931 | from unreal_engine import FSlowTask
import time
# Create an FSlowTask object, defining the amount of work that
# will be done, and the initial message.
t = FSlowTask(10, "Doing Something")
t.initialize()
# Make the dialog, and include a Cancel button (default is not to
# allow a cancel button).
t.make_dialog(True)
time.sleep(1)
for i in range(10) :
# Update the progress bar. Note that the first argument is the
# amount of work to be done this frame, not the overall work
# done so far.
t.enter_progress_frame(1, "Progress Position : {}".format(i))
time.sleep(0.2)
# If there was a cancel button included, we can check if it was
# pressed.
if t.received_user_cancel():
print("Cancelled")
break
t.destroy()
|
tools/harness/tests/multihoptests.py | lambdaxymox/barrelfish | 111 | 12649972 | <gh_stars>100-1000
##########################################################################
# Copyright (c) 2009, 2010, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import debug, tests
from common import TestCommon, TimeoutError
from results import RawResults, PassFailResult
class MultihopTestCommon(TestCommon):
def get_module_name(self):
raise NotImplementedError
def get_modules(self, build, machine):
modules = super(MultihopTestCommon, self).get_modules(build, machine)
modules.add_module(self.get_module_name())
return modules
@tests.add_test
class MultihopTest(MultihopTestCommon):
''' Test whether multi-hop messaging is working '''
name = "multihop_test"
def get_module_name(self):
return "multihoptest"
def get_finish_string(self):
return "server all done"
def get_modules(self, build, machine):
modules = super(MultihopTestCommon, self).get_modules(build, machine)
modules.add_module(self.get_module_name(),["core=0", "server"])
modules.add_module(self.get_module_name(),["core=1", "client"])
return modules
def process_data(self, testdir, rawiter):
# the test passed iff we see the finish string
passed = False
for line in rawiter:
if self.get_finish_string() in line:
passed = True
return PassFailResult(True)
return PassFailResult(False)
@tests.add_test
class MultihopLatencyTest(MultihopTestCommon):
''' Multihop Transport Throughput microbenchmark '''
name = "multihop_throughput_latency"
def get_module_name(self):
return "multihop_latency_bench"
def process_data(self, testdir, rawiter):
results = RawResults('message type')
times = []
iteration = None
for line in rawiter:
m = re.match("Running latency test for message (.*)....", line)
if m:
if times:
results.add_group(iteration, times)
iteration = m.group(1)
times = []
continue
m = re.match("page \d+ took (\d+)", line)
if m:
assert(iteration is not None)
times.append(int(m.group(1)))
if len(times) != 0:
results.add_group(iteration, times)
return results
|
rqalpha/model/__init__.py | LawrentChen/rqalpha | 5,263 | 12650035 | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
from .order import Order
from .trade import Trade
from .instrument import Instrument
from .bar import BarMap, BarObject, PartialBarObject
from .tick import TickObject
|
docs/build.py | TimoRoth/ghp-import | 313 | 12650046 | <gh_stars>100-1000
#!/usr/bin/env python
import io
import os
from markdown import markdown
def main():
base = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(os.path.dirname(base), "README.md")
with io.open(readme_path, encoding="utf-8") as fobj:
readme = fobj.read()
template_path = os.path.join(base, "index.html.tmpl")
with io.open(template_path, encoding="utf-8") as fobj:
template = fobj.read()
index_path = os.path.join(base, "index.html")
with io.open(index_path, mode="w", encoding="utf-8") as fobj:
html = markdown(readme, extensions=["fenced_code"])
fobj.write(template.format(body=html))
if __name__ == "__main__":
main()
|
tests/management/commands/test_drop_test_database.py | dankgen-tobias/django-extensions | 4,057 | 12650047 | <filename>tests/management/commands/test_drop_test_database.py
# -*- coding: utf-8 -*-
from io import StringIO
from unittest.mock import MagicMock, Mock, PropertyMock, call, patch
from django.core.management import CommandError, call_command
from django.test import TestCase
from django.test.utils import override_settings
# Database testing configurations
UNKOWN_ENGINE = {
'default': {
'ENGINE': 'django.db.backends.unknown',
'NAME': 'unknown',
}
}
NO_TEST_NAME = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test',
'TEST': {
'NAME': '',
}
}
}
SQLITE = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
}
MYSQL_HOST_PORT = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test',
'USER': 'test',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost',
'PORT': '3306',
},
}
MYSQL_SOCKET = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test',
'USER': 'test',
'PASSWORD': '<PASSWORD>',
'HOST': '/var/run/mysqld/mysql.sock',
},
}
POSTGRES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test',
'USER': 'test',
'PASSWORD': '<PASSWORD>',
'PORT': '5432',
'HOST': 'localhost',
},
}
class DropTestDatabaseExceptionsTests(TestCase):
"""Test for drop_test_database command."""
def test_should_raise_CommandError_if_database_is_unknown(self):
with self.assertRaisesRegex(CommandError, "Unknown database unknown"):
call_command('drop_test_database', '--database=unknown')
@override_settings(DATABASES=UNKOWN_ENGINE)
@patch('django_extensions.management.commands.drop_test_database.input')
def test_should_raise_CommandError_if_unknown_database_engine(self, m_input):
m_input.return_value = 'yes'
with self.assertRaisesRegex(CommandError, "Unknown database engine django.db.backends.unknown"):
call_command('drop_test_database')
@override_settings(DATABASES=NO_TEST_NAME)
def test_should_raise_CommandError_if_test_database_name_is_empty(self):
with self.assertRaisesRegex(CommandError, "You need to specify DATABASE_NAME in your Django settings file."):
call_command('drop_test_database')
class DropTestDatabaseTests(TestCase):
"""Test for drop_test_database command."""
@patch('sys.stdout', new_callable=StringIO)
@patch('django_extensions.management.commands.drop_test_database.input')
def test_should_raise_CommandError_if_database_is_unknown(self, m_input, m_stdout):
m_input.return_value = 'no'
call_command('drop_test_database')
self.assertEqual("Reset cancelled.\n", m_stdout.getvalue())
@override_settings(DATABASES=SQLITE)
@patch('sys.stdout', new_callable=StringIO)
@patch('os.path.isfile')
@patch('os.unlink')
def test_sqlite3_should_unlink_primary_test_database(self, m_unlink, m_isfile, m_stdout):
# Indicate that no clone databases exist
m_isfile.side_effect = (True, False)
call_command('drop_test_database', '--noinput', verbosity=2)
with self.subTest('Should check for test database names until failure'):
self.assertListEqual(
m_isfile.call_args_list,
# See production code comments regarding double dots
[call('test_db.sqlite3'), call('test_db_1..sqlite3')],
)
with self.subTest('Should unlink only primary test database'):
self.assertListEqual(
m_unlink.call_args_list,
[call('test_db.sqlite3')],
)
with self.subTest('Should report successful message'):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=SQLITE)
@patch('os.path.isfile')
@patch('os.unlink')
def test_sqlite3_should_unlink_all_existing_clone_databases(self, m_unlink, m_isfile):
"""Test cloned test databases created via 'manage.py test --parallel'."""
# Indicate that clone databases exist up to test_db_2.sqlite3
m_isfile.side_effect = (True, True, True, False)
call_command('drop_test_database', '--noinput')
with self.subTest('Should check for test database names until failure'):
self.assertListEqual(
m_isfile.call_args_list,
[
call('test_db.sqlite3'),
# See production code comments regarding double dots
call('test_db_1..sqlite3'),
call('test_db_2..sqlite3'),
call('test_db_3..sqlite3'),
],
)
with self.subTest('Should unlink all existing test databases'):
self.assertListEqual(
m_unlink.call_args_list,
[
call('test_db.sqlite3'),
# See production code comments regarding double dots
call('test_db_1..sqlite3'),
call('test_db_2..sqlite3'),
],
)
@override_settings(DATABASES=SQLITE)
@patch('sys.stdout', new_callable=StringIO)
@patch('os.path.isfile')
@patch('os.unlink')
def test_sqlite3_should_not_print_Reset_successful_when_OSError_exception(self, m_unlink, m_isfile, m_stdout):
m_isfile.return_value = True
m_unlink.side_effect = OSError
call_command('drop_test_database', '--noinput', verbosity=2)
self.assertNotIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=MYSQL_HOST_PORT)
@patch('sys.stdout', new_callable=StringIO)
def test_mysql_should_drop_database_with_host_and_port(self, m_stdout):
m_database = MagicMock()
# Indicate that no clone databases exist
# DROP queries return None while SELECT queries return a row count
m_database.connect.return_value.cursor.return_value.execute.side_effect = (1, None, 0)
with patch.dict("sys.modules", MySQLdb=m_database):
call_command('drop_test_database', '--noinput', verbosity=2)
with self.subTest('Should check for and remove test database names until failure'):
exists_query = "SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME="
self.assertListEqual(
m_database.connect.return_value.cursor.return_value.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS `test_test`'),
call(exists_query + "'test_test_1';"),
],
)
with self.subTest('Should report successful message'):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=MYSQL_SOCKET)
@patch('sys.stdout', new_callable=StringIO)
def test_mysql_should_drop_database_with_unix_socket(self, m_stdout):
m_database = MagicMock()
# Indicate that no clone databases exist
# DROP queries return None while SELECT queries return a row count
m_database.connect.return_value.cursor.return_value.execute.side_effect = (1, None, 0)
with patch.dict("sys.modules", MySQLdb=m_database):
call_command('drop_test_database', '--noinput', verbosity=2)
with self.subTest('Should check for and remove test database names until failure'):
exists_query = "SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME="
self.assertListEqual(
m_database.connect.return_value.cursor.return_value.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS `test_test`'),
call(exists_query + "'test_test_1';"),
],
)
with self.subTest('Should report successful message'):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=MYSQL_HOST_PORT)
def test_mysql_should_drop_all_existing_clone_databases(self):
"""Test cloned test databases created via 'manage.py test --parallel'."""
m_database = MagicMock()
# Indicate that clone databases exist up to test_test_2
# DROP queries return None while SELECT queries return a row count
m_database.connect.return_value.cursor.return_value.execute.side_effect = \
(1, None, 1, None, 1, None, 0)
with patch.dict("sys.modules", MySQLdb=m_database):
call_command('drop_test_database', '--noinput')
exists_query = "SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME="
self.assertListEqual(
m_database.connect.return_value.cursor.return_value.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS `test_test`'),
call(exists_query + "'test_test_1';"),
call('DROP DATABASE IF EXISTS `test_test_1`'),
call(exists_query + "'test_test_2';"),
call('DROP DATABASE IF EXISTS `test_test_2`'),
call(exists_query + "'test_test_3';"),
],
)
@override_settings(DATABASES=POSTGRES)
@patch('sys.stdout', new_callable=StringIO)
def test_postgresql_should_drop_database(self, m_stdout):
m_database = MagicMock()
m_cursor = Mock()
m_database.connect.return_value.cursor.return_value = m_cursor
# Indicate that no clone databases exist
type(m_cursor).rowcount = PropertyMock(side_effect=(1, 0))
with patch.dict("sys.modules", psycopg2=m_database):
call_command('drop_test_database', '--noinput', verbosity=2)
with self.subTest('Should check for and remove test database names until failure'):
exists_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname="
self.assertListEqual(
m_cursor.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS "test_test";'),
call(exists_query + "'test_test_1';"),
],
)
with self.subTest('Should report successful message'):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=POSTGRES)
def test_postgresql_should_drop_all_existing_cloned_databases(self):
"""Test cloned test databases created via 'manage.py test --parallel'."""
m_database = MagicMock()
m_cursor = Mock()
m_database.connect.return_value.cursor.return_value = m_cursor
# Indicate that clone databases exist up to test_test_2
type(m_cursor).rowcount = PropertyMock(side_effect=(1, 1, 1, 0))
with patch.dict("sys.modules", psycopg2=m_database):
call_command('drop_test_database', '--noinput')
exists_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname="
self.assertListEqual(
m_cursor.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS "test_test";'),
call(exists_query + "'test_test_1';"),
call('DROP DATABASE IF EXISTS "test_test_1";'),
call(exists_query + "'test_test_2';"),
call('DROP DATABASE IF EXISTS "test_test_2";'),
call(exists_query + "'test_test_3';"),
],
)
@override_settings(DATABASES=POSTGRES)
@patch('sys.stdout', new_callable=StringIO)
def test_postgresql_should_not_print_Reset_successful_when_exception_occured(self, m_stdout):
m_database = MagicMock()
m_database.ProgrammingError = Exception
m_cursor = Mock()
m_cursor.execute.side_effect = m_database.ProgrammingError
m_database.connect.return_value.cursor.return_value = m_cursor
with patch.dict("sys.modules", psycopg2=m_database):
call_command('drop_test_database', '--noinput', verbosity=2)
self.assertNotIn("Reset successful.", m_stdout.getvalue())
|
backend/auth_api/migrations/0004_extra_github_details.py | NeilsC/prestige | 314 | 12650057 | # Generated by Django 3.2.5 on 2021-09-04 14:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth_api', '0003_githubidentity'),
]
operations = [
migrations.AlterModelOptions(
name='githubidentity',
options={'verbose_name': 'GitHub Identity', 'verbose_name_plural': 'GitHub Identities'},
),
migrations.AddField(
model_name='githubidentity',
name='avatar_url',
field=models.CharField(max_length=200, null=True),
),
migrations.AddField(
model_name='githubidentity',
name='db_id',
field=models.IntegerField(default=-1, help_text='The numeric database ID of the user on GitHub'),
preserve_default=False,
),
migrations.AddField(
model_name='githubidentity',
name='user_handle',
field=models.CharField(default='', max_length=100),
preserve_default=False,
),
migrations.AlterField(
model_name='githubidentity',
name='uid',
field=models.CharField(help_text='The newer string ID of the user on GitHub', max_length=100),
),
migrations.AlterField(
model_name='githubidentity',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='github_ids', to=settings.AUTH_USER_MODEL),
),
]
|
examples/slack/botapp/mypythonbot/code.py | patgoley/gordon | 2,204 | 12650085 | import json
from urlparse import parse_qs
def handler(event, context):
with open('.context', 'r') as f:
gordon_context = json.loads(f.read())
expected_token = gordon_context['token']
req_body = event['body']
params = parse_qs(req_body)
# Check if the token is the correct one
token = params['token'][0]
if token != expected_token:
raise Exception("Invalid request token")
user = params['user_name'][0]
command = params['command'][0]
channel = params['channel_name'][0]
command_text = params['text'][0]
response = {
'response_type': 'in_channel',
'text': "Hello {}! you invoked {} while you were in {} with the following text: {}".format(user, command, channel, command_text),
"attachments": [
{
"text": "This is some extra information!"
}
]
}
return response
|
at_learner_core/at_learner_core/models/model.py | hieuvecto/CASIA-SURF_CeFA | 133 | 12650110 | <reponame>hieuvecto/CASIA-SURF_CeFA
'''
TODO:
def get_wrapper
def get_optimizer
''' |
kratos/python_scripts/timer_process.py | lkusch/Kratos | 778 | 12650122 | # Importing the Kratos Library
import KratosMultiphysics
def Factory(settings, Model):
if not isinstance(settings, KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return TimerProcess(Model, settings["Parameters"])
# All the processes python processes should be derived from "Process"
class TimerProcess(KratosMultiphysics.Process):
"""This process helps to measure the time consumed on the simulations
Only the member variables listed below should be accessed directly.
Public member variables:
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
def __init__(self, Model, settings ):
""" The default constructor of the class
Keyword arguments:
self -- It signifies an instance of a class.
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
KratosMultiphysics.Process.__init__(self)
#The value can be a double or a string (function)
default_settings = KratosMultiphysics.Parameters("""
{
"help" : "This process helps to measure the time consumed on the simulations",
"output_filename" : "",
"print_interval_information" : false,
"interval_name" : "Analysis"
}
"""
)
# Assign this here since it will change the "interval" prior to validation
settings.ValidateAndAssignDefaults(default_settings)
self.interval_name = settings["interval_name"].GetString()
self.output_filename = settings["output_filename"].GetString()
# Defining timer
self.timer = KratosMultiphysics.Timer()
# Interval information
self.timer.SetPrintIntervalInformation(settings["print_interval_information"].GetBool())
# Output file
if self.output_filename != "":
self.timer.SetOuputFile(self.output_filename)
else:
self.timer.SetPrintOnScreen(True)
# Starting timer
self.timer.Start(self.interval_name)
def ExecuteFinalize(self):
""" This function is designed for being called at the end of the computations
Keyword arguments:
self -- It signifies an instance of a class.
"""
self.timer.Stop(self.interval_name)
if self.output_filename != "":
self.timer.PrintTimingInformation(self.timer)
self.timer.CloseOuputFile()
|
keras_squeezenet/__init__.py | zr71516/squeezeNet | 430 | 12650128 | from keras_squeezenet.squeezenet import SqueezeNet
from keras_squeezenet.version import __version__
|
openstackclient/tests/unit/api/test_compute_v2.py | cloudification-io/python-openstackclient | 262 | 12650133 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Compute v2 API Library Tests"""
from keystoneauth1 import session
from osc_lib import exceptions as osc_lib_exceptions
from requests_mock.contrib import fixture
from openstackclient.api import compute_v2 as compute
from openstackclient.tests.unit import utils
FAKE_PROJECT = 'xyzpdq'
FAKE_URL = 'http://gopher.com/v2'
class TestComputeAPIv2(utils.TestCase):
def setUp(self):
super(TestComputeAPIv2, self).setUp()
sess = session.Session()
self.api = compute.APIv2(session=sess, endpoint=FAKE_URL)
self.requests_mock = self.useFixture(fixture.Fixture())
class TestFloatingIP(TestComputeAPIv2):
FAKE_FLOATING_IP_RESP = {
'id': 1,
'ip': '203.0.113.11', # TEST-NET-3
'fixed_ip': '198.51.100.11', # TEST-NET-2
'pool': 'nova',
'instance_id': None,
}
FAKE_FLOATING_IP_RESP_2 = {
'id': 2,
'ip': '203.0.113.12', # TEST-NET-3
'fixed_ip': '198.51.100.12', # TEST-NET-2
'pool': 'nova',
'instance_id': None,
}
LIST_FLOATING_IP_RESP = [
FAKE_FLOATING_IP_RESP,
FAKE_FLOATING_IP_RESP_2,
]
FAKE_SERVER_RESP_1 = {
'id': 1,
'name': 'server1',
}
def test_floating_ip_add_id(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
json={'server': {}},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_add('1', '192.168.3.11')
self.assertEqual(200, ret.status_code)
def test_floating_ip_add_name(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
json={'server': {}},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/server1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_add('server1', '192.168.3.11')
self.assertEqual(200, ret.status_code)
def test_floating_ip_create(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-floating-ips',
json={'floating_ip': self.FAKE_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_create('nova')
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_create_not_found(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-floating-ips',
status_code=404,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.floating_ip_create,
'not-nova',
)
def test_floating_ip_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-floating-ips/1',
status_code=202,
)
ret = self.api.floating_ip_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_floating_ip_delete_none(self):
ret = self.api.floating_ip_delete()
self.assertIsNone(ret)
def test_floating_ip_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/1',
json={'floating_ip': self.FAKE_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_find('1')
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_find_ip(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/' + self.FAKE_FLOATING_IP_RESP['ip'],
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_find(self.FAKE_FLOATING_IP_RESP['ip'])
self.assertEqual(self.FAKE_FLOATING_IP_RESP, ret)
def test_floating_ip_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips/1.2.3.4',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.floating_ip_find,
'192.168.3.11',
)
def test_floating_ip_list(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ips',
json={'floating_ips': self.LIST_FLOATING_IP_RESP},
status_code=200,
)
ret = self.api.floating_ip_list()
self.assertEqual(self.LIST_FLOATING_IP_RESP, ret)
def test_floating_ip_remove_id(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_remove('1', '192.168.3.11')
self.assertEqual(200, ret.status_code)
def test_floating_ip_remove_name(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/servers/1/action',
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/servers/server1',
json={'server': self.FAKE_SERVER_RESP_1},
status_code=200,
)
ret = self.api.floating_ip_remove('server1', '192.168.3.11')
self.assertEqual(200, ret.status_code)
class TestFloatingIPPool(TestComputeAPIv2):
LIST_FLOATING_IP_POOL_RESP = [
{"name": "tide"},
{"name": "press"},
]
def test_floating_ip_pool_list(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-floating-ip-pools',
json={'floating_ip_pools': self.LIST_FLOATING_IP_POOL_RESP},
status_code=200,
)
ret = self.api.floating_ip_pool_list()
self.assertEqual(self.LIST_FLOATING_IP_POOL_RESP, ret)
class TestHost(TestComputeAPIv2):
FAKE_HOST_RESP_1 = {
"zone": "internal",
"host_name": "myhost",
"service": "conductor",
}
FAKE_HOST_RESP_2 = {
"zone": "internal",
"host_name": "myhost",
"service": "scheduler",
}
FAKE_HOST_RESP_3 = {
"zone": "nova",
"host_name": "myhost",
"service": "compute",
}
LIST_HOST_RESP = [
FAKE_HOST_RESP_1,
FAKE_HOST_RESP_2,
FAKE_HOST_RESP_3,
]
def test_host_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts',
json={'hosts': self.LIST_HOST_RESP},
status_code=200,
)
ret = self.api.host_list()
self.assertEqual(self.LIST_HOST_RESP, ret)
def test_host_list_zone(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts?zone=nova',
json={'hosts': [self.FAKE_HOST_RESP_3]},
status_code=200,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts',
json={'hosts': [self.FAKE_HOST_RESP_3]},
status_code=200,
)
ret = self.api.host_list(zone='nova')
self.assertEqual([self.FAKE_HOST_RESP_3], ret)
def test_host_set_none(self):
ret = self.api.host_set(host='myhost')
self.assertIsNone(ret)
def test_host_set(self):
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-hosts/myhost',
json={},
status_code=200,
)
ret = self.api.host_set(host='myhost', status='enabled')
self.assertEqual({}, ret)
def test_host_show(self):
FAKE_RESOURCE_1 = {
"cpu": 2,
"disk_gb": 1028,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 8192,
"project": "(total)",
}
FAKE_RESOURCE_2 = {
"cpu": 0,
"disk_gb": 0,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 512,
"project": "(used_now)",
}
FAKE_RESOURCE_3 = {
"cpu": 0,
"disk_gb": 0,
"host": "c1a7de0ac9d94e4baceae031d05caae3",
"memory_mb": 0,
"project": "(used_max)",
}
FAKE_HOST_RESP = [
{'resource': FAKE_RESOURCE_1},
{'resource': FAKE_RESOURCE_2},
{'resource': FAKE_RESOURCE_3},
]
FAKE_HOST_LIST = [
FAKE_RESOURCE_1,
FAKE_RESOURCE_2,
FAKE_RESOURCE_3,
]
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-hosts/myhost',
json={'host': FAKE_HOST_RESP},
status_code=200,
)
ret = self.api.host_show(host='myhost')
self.assertEqual(FAKE_HOST_LIST, ret)
class TestNetwork(TestComputeAPIv2):
FAKE_NETWORK_RESP = {
'id': '1',
'label': 'label1',
'cidr': '1.2.3.0/24',
}
FAKE_NETWORK_RESP_2 = {
'id': '2',
'label': 'label2',
'cidr': '4.5.6.0/24',
}
LIST_NETWORK_RESP = [
FAKE_NETWORK_RESP,
FAKE_NETWORK_RESP_2,
]
def test_network_create_default(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-networks',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_create('label1')
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-networks',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_create(
name='label1',
subnet='1.2.3.0/24',
)
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_delete_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/1',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-networks/1',
status_code=202,
)
ret = self.api.network_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_network_delete_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label1',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-networks/1',
status_code=202,
)
ret = self.api.network_delete('label1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_network_delete_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.network_delete,
'label3',
)
def test_network_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/1',
json={'network': self.FAKE_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_find('1')
self.assertEqual(self.FAKE_NETWORK_RESP, ret)
def test_network_find_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_find('label2')
self.assertEqual(self.FAKE_NETWORK_RESP_2, ret)
def test_network_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks/label3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.network_find,
'label3',
)
def test_network_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-networks',
json={'networks': self.LIST_NETWORK_RESP},
status_code=200,
)
ret = self.api.network_list()
self.assertEqual(self.LIST_NETWORK_RESP, ret)
class TestSecurityGroup(TestComputeAPIv2):
FAKE_SECURITY_GROUP_RESP = {
'id': '1',
'name': 'sg1',
'description': 'test security group',
'tenant_id': '0123456789',
'rules': []
}
FAKE_SECURITY_GROUP_RESP_2 = {
'id': '2',
'name': 'sg2',
'description': 'another test security group',
'tenant_id': '0123456789',
'rules': []
}
LIST_SECURITY_GROUP_RESP = [
FAKE_SECURITY_GROUP_RESP_2,
FAKE_SECURITY_GROUP_RESP,
]
def test_security_group_create_default(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-groups',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_create('sg1')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-groups',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_create(
name='sg1',
description='desc',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_delete_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-groups/1',
status_code=202,
)
ret = self.api.security_group_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_security_group_delete_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg1',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-groups/1',
status_code=202,
)
ret = self.api.security_group_delete('sg1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
def test_security_group_delete_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.security_group_delete,
'sg3',
)
def test_security_group_find_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_find('1')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_find_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_find('sg2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret)
def test_security_group_find_not_found(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg3',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.assertRaises(
osc_lib_exceptions.NotFound,
self.api.security_group_find,
'sg3',
)
def test_security_group_list_no_options(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_list()
self.assertEqual(self.LIST_SECURITY_GROUP_RESP, ret)
def test_security_group_set_options_id(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-security-groups/1',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP},
status_code=200,
)
ret = self.api.security_group_set(
security_group='1',
description='desc2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP, ret)
def test_security_group_set_options_name(self):
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups/sg2',
status_code=404,
)
self.requests_mock.register_uri(
'GET',
FAKE_URL + '/os-security-groups',
json={'security_groups': self.LIST_SECURITY_GROUP_RESP},
status_code=200,
)
self.requests_mock.register_uri(
'PUT',
FAKE_URL + '/os-security-groups/2',
json={'security_group': self.FAKE_SECURITY_GROUP_RESP_2},
status_code=200,
)
ret = self.api.security_group_set(
security_group='sg2',
description='desc2')
self.assertEqual(self.FAKE_SECURITY_GROUP_RESP_2, ret)
class TestSecurityGroupRule(TestComputeAPIv2):
FAKE_SECURITY_GROUP_RULE_RESP = {
'id': '1',
'name': 'sgr1',
'tenant_id': 'proj-1',
'ip_protocol': 'TCP',
'from_port': 1,
'to_port': 22,
'group': {},
# 'ip_range': ,
# 'cidr': ,
# 'parent_group_id': ,
}
def test_security_group_create_no_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
ret = self.api.security_group_rule_create(
security_group_id='1',
ip_protocol='tcp',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret)
def test_security_group_create_options(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
ret = self.api.security_group_rule_create(
security_group_id='1',
ip_protocol='tcp',
from_port=22,
to_port=22,
remote_ip='1.2.3.4/24',
)
self.assertEqual(self.FAKE_SECURITY_GROUP_RULE_RESP, ret)
def test_security_group_create_port_errors(self):
self.requests_mock.register_uri(
'POST',
FAKE_URL + '/os-security-group-rules',
json={'security_group_rule': self.FAKE_SECURITY_GROUP_RULE_RESP},
status_code=200,
)
self.assertRaises(
compute.InvalidValue,
self.api.security_group_rule_create,
security_group_id='1',
ip_protocol='tcp',
from_port='',
to_port=22,
remote_ip='1.2.3.4/24',
)
self.assertRaises(
compute.InvalidValue,
self.api.security_group_rule_create,
security_group_id='1',
ip_protocol='tcp',
from_port=0,
to_port=[],
remote_ip='1.2.3.4/24',
)
def test_security_group_rule_delete(self):
self.requests_mock.register_uri(
'DELETE',
FAKE_URL + '/os-security-group-rules/1',
status_code=202,
)
ret = self.api.security_group_rule_delete('1')
self.assertEqual(202, ret.status_code)
self.assertEqual("", ret.text)
|
migrations/versions/ad23a56abf25_.py | boladmin/security_monkey | 4,258 | 12650149 | <reponame>boladmin/security_monkey<gh_stars>1000+
"""Ability to link issues to other tech type items
Revision ID: <KEY>
Revises: 67ea2aac5ea0
Create Date: 2016-02-23 18:52:45.024716
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('issue_item_association',
sa.Column('super_issue_id', sa.Integer(), nullable=True),
sa.Column('sub_item_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sub_item_id'], ['item.id'], ),
sa.ForeignKeyConstraint(['super_issue_id'], ['itemaudit.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('issue_item_association')
### end Alembic commands ###
|
recipes/Python/415983_Simple_XML_serlializerdeserializer_using/recipe-415983.py | tdiprima/code | 2,023 | 12650197 | """Simple XML marshaling (serializing) and
unmarshaling(de-serializing) module using Python
dictionaries and the marshal module.
"""
from xml.sax.handler import ContentHandler
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import XMLReader
from xml.sax import make_parser
import marshal
import os,sys,zlib
class XMLDictionaryHandler(ContentHandler):
"""SAX Handler class which converts an XML
file to a corresponding Python dictionary """
def __init__(self):
self.curr=''
self.parent=''
self.count=0
self.d = {}
self.currd = {}
self.parentd = {}
self.stack = []
self.stack2 = []
def startElement(self, name, attrs):
""" Start element handler """
if self.count==0:
self.parent=name
self.d[name] = [dict(attrs),
'',
[]]
self.currd = self.d
else:
chld={name: [dict(attrs),
'',
[] ]}
self.parent = self.stack[-1]
self.parentd = self.stack2[-1]
chldlist = (self.parentd[self.parent])[2]
chldlist.append(chld)
self.currd = chld
self.stack.append(name)
self.stack2.append(self.currd)
self.curr=name
self.count += 1
def endElement(self, name):
""" End element handler """
self.stack.remove(name)
for item in self.stack2:
if item.has_key(name):
self.stack2.remove(item)
def characters(self, content):
""" Character handler """
content = (content.encode('utf-8')).strip()
if content:
myd=((self.parentd[self.parent])[2])[-1]
currcontent = (myd[self.curr])[1]
(myd[self.curr])[1] = "".join((currcontent, content))
def endDocument(self):
""" End document handler """
# Compress all text items
self.packtext(self.d)
def packtext(self, map):
for key, value in map.items():
text = value[1]
value[1] = zlib.compress(text)
children = value[2]
for submap in children:
self.packtext(submap)
class BinXMLSAXParser(XMLReader):
"""A parser for Python binary marshal files representing
XML information using SAX interfaces """
def __init__(self):
XMLReader.__init__(self)
self.depth = 0
def parse(self, stream):
""" Parse Method """
# Check if it is a file object
if type(stream) is file:
try:
self.d = marshal.load(stream)
except Exception, e:
sys.exit(e)
# Check if it is a file path
elif os.path.exists(stream):
try:
self.d = marshal.load(open(stream,'rb'))
except Exception, e:
sys.exit(e)
else:
raise 'BinXMLSAXParserException: Invalid Input Source'
self._cont_handler.startDocument()
self.__parse(self.d)
self._cont_handler.endDocument()
def __parse(self, map):
""" Recursive parse method for
XML dictionary """
for key, value in map.items():
# For pretty printing
self._cont_handler.ignorableWhitespace(" "*self.depth)
attrs = value[0]
text = value[1]
children = value[2]
# Fire startElement handler event for key
self._cont_handler.startElement(key, attrs)
# Fire character handler event for value
self._cont_handler.characters(zlib.decompress(text))
# Nested element, recursively call
# this function...
self.depth += 1
# For pretty printing
self._cont_handler.ignorableWhitespace('\n')
for child in children:
self.__parse(child)
self.depth -= 1
# For pretty printing
self._cont_handler.ignorableWhitespace(" "*self.depth)
# Fire end element handler event
self._cont_handler.endElement(key)
# For pretty printing
self._cont_handler.ignorableWhitespace('\n')
class XMLMarshal(object):
""" The XML marshalling class """
def dump(stream, xmlfile):
""" Serialize XML data to a file """
try:
p=make_parser()
h = XMLDictionaryHandler()
p.setContentHandler(h)
p.parse(open(xmlfile))
# print h.d
marshal.dump(h.d, stream)
except Exception, e:
sys.exit(e)
def dumps(stream, xmlfile):
""" Serialize XML data to a string """
try:
p=make_parser()
p.setContentHandler()
h = XMLDictionaryHandler()
p.parse(open(xmlfile))
return marshal.dumps(h.d, stream)
except Exception, e:
sys.exit(e)
return None
def load(stream, out=sys.stdout):
""" Load an XML binary stream
and send XML text to the output
stream 'out' """
try:
p=BinXMLSAXParser()
p.setContentHandler(XMLGenerator(out))
p.parse(stream)
except Exception, e:
sys.exit(e)
def loads(stream):
""" Load an XML binary stream
and return XML text as string """
import cStringIO
c=cStringIO.StringIO()
try:
p=BinXMLSAXParser()
p.setContentHandler(XMLGenerator(c))
p.parse(stream)
except Exception, e:
sys.exit(e)
return c.getvalue()
dump=staticmethod(dump)
dumps=staticmethod(dumps)
load=staticmethod(load)
loads=staticmethod(loads)
if __name__ == '__main__':
fname = 'sample.xml'
binname = os.path.splitext(fname)[0] + '.bin'
# Dump XML text to binary
XMLMarshal.dump(open(binname,'wb'), fname)
# Dump XML binary to text
XMLMarshal.load(open(binname,'rb'), open('sample.xml','w'))
|
nuplan/planning/simulation/controller/tracker/abstract_tracker.py | motional/nuplan-devkit | 128 | 12650207 | <reponame>motional/nuplan-devkit
import abc
from nuplan.common.actor_state.dynamic_car_state import DynamicCarState
from nuplan.common.actor_state.ego_state import EgoState
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
from nuplan.planning.simulation.trajectory.abstract_trajectory import AbstractTrajectory
class AbstractTracker(abc.ABC):
"""
Interface for a generic tracker.
"""
@abc.abstractmethod
def initialize(self) -> None:
"""
Initializes the tracker.
"""
pass
@abc.abstractmethod
def track_trajectory(
self,
current_iteration: SimulationIteration,
next_iteration: SimulationIteration,
initial_state: EgoState,
trajectory: AbstractTrajectory,
) -> DynamicCarState:
"""
Return an ego state with updated dynamics according to the controller commands.
:param current_iteration: The current simulation iteration.
:param next_iteration: The desired next simulation iteration.
:param initial_state: The current simulation iteration.
:param trajectory: The reference trajectory to track.
:return: The ego state to be propagated
"""
pass
|
applications/fixImageXml.py | vincentschut/isce2 | 1,133 | 12650231 | <filename>applications/fixImageXml.py<gh_stars>1000+
#!/usr/bin/env python3
import os
import argparse
import isce
import isceobj
from isceobj.Util.ImageUtil import ImageLib as IML
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Fixes pathnames in ISCE image XML files. Can be used to do more things in the future.')
parser.add_argument('-i', '--input', type=str, required=True, dest='infile',
help = 'Input image for which the XML file needs to be fixed.')
fname = parser.add_mutually_exclusive_group(required=True)
fname.add_argument('-f', '--full', action='store_true',
help = 'Replace filename with full path including dir in which file is located')
fname.add_argument('-b', '--base', action='store_true',
help = 'Replace filename with basename to use in current directory')
inps = parser.parse_args()
return inps
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
if inps.infile.endswith('.xml'):
inps.infile = os.path.splitext(inps.infile)[0]
dirname = os.path.dirname(inps.infile)
img = IML.loadImage(inps.infile)[0]
if inps.full:
fname = os.path.abspath( os.path.join(dirname, os.path.basename(inps.infile)))
else:
fname = os.path.basename( os.path.basename(inps.infile))
img.filename = fname
img.setAccessMode('READ')
img.renderHdr()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.