max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
analytics/duplicates_analyze.py | mattip/builder | 225 | 11111916 | #!/usr/bin/env python3
from typing import Dict, List
from subprocess import check_output
import os
import sys
def get_defined_symbols(fname: str, verbose: bool = False) -> Dict[str, int]:
if verbose:
print(f"Processing {fname}...", end='', flush=True)
if sys.platform == 'darwin':
lines = check_output(['nm', '--defined-only', '-n', fname]).decode('ascii').split("\n")[:-1]
rc = {}
for idx, line in enumerate(lines):
addr, stype, name = line.split(' ')
size = 4 if idx + 1 == len(lines) else (int(lines[idx + 1].split(' ')[0], 16) - int(addr, 16))
rc[name] = size
else:
lines = check_output(['nm', '--print-size', '--defined-only', fname]).decode('ascii').split('\n')
rc = {e[3]: int(e[1], 16) for e in [line.split() for line in lines] if len(e) == 4}
if verbose:
print("done")
return rc
def get_deps(fname: str) -> List[str]:
if sys.platform == 'darwin':
rc = []
lines = check_output(['otool', '-l', fname]).decode('ascii').split("\n")[1:-1]
for idx, line in enumerate(lines):
if line.strip() != 'cmd LC_LOAD_DYLIB':
continue
path = lines[idx + 2].strip()
assert path.startswith('name')
rc.append(os.path.basename(path.split(' ')[1]))
return rc
lines = check_output(['readelf', '--dynamic', fname]).decode('ascii').split('\n')
return [line.split('[')[1][:-1] for line in lines if '(NEEDED)' in line]
def humansize(size):
if size < 1024:
return f"{size} bytes"
if size < 1024**2:
return f"{int(size/1024)} Kb"
if size < 1024**3:
return f"{size/(1024.0**2):.2f} Mb"
return f"{size/(1024.0**3):.2f} Gb"
def print_sizes(libname, depth: int = 2) -> None:
libs = [libname]
depth = 2
symbols = {os.path.basename(libname): get_defined_symbols(libname, verbose=True)}
for _ in range(depth):
for lib in libs:
dirname = os.path.dirname(lib)
for dep in get_deps(lib):
path = os.path.join(dirname, dep)
if not os.path.exists(path):
continue
if path not in libs:
libs.append(path)
symbols[dep] = get_defined_symbols(path, verbose=True)
for lib in libs:
lib_symbols = symbols[os.path.basename(lib)]
lib_keys = set(lib_symbols.keys())
rc = f"{lib} symbols size {humansize(sum(lib_symbols.values()))}"
for dep in get_deps(lib):
if dep not in symbols:
continue
dep_overlap = lib_keys.intersection(set(symbols[dep].keys()))
overlap_size = sum(lib_symbols[k] for k in dep_overlap)
if overlap_size > 0:
rc += f" {dep} overlap is {humansize(overlap_size)}"
print(rc)
def print_symbols_overlap(libname1: str, libname2: str) -> None:
sym1 = get_defined_symbols(libname1, verbose=True)
sym2 = get_defined_symbols(libname2, verbose=True)
sym1_size = sum(sym1.values())
sym2_size = sum(sym2.values())
sym_overlap = set(sym1.keys()).intersection(set(sym2.keys()))
overlap_size = sum(sym1[s] for s in sym_overlap)
if overlap_size == 0:
print(f"{libname1} symbols size {humansize(sym1_size)} does not overlap with {libname2}")
return
print(f"{libname1} symbols size {humansize(sym1_size)} overlap {humansize(overlap_size)} ({100.0 * overlap_size/sym1_size :.2f}%)")
for sym in sym_overlap:
print(sym)
if __name__ == '__main__':
if len(sys.argv) == 3:
print_symbols_overlap(sys.argv[1], sys.argv[2])
else:
print_sizes(sys.argv[1] if len(sys.argv) > 1 else "lib/libtorch_cuda.so")
|
examples/django_110/app/views.py | ascan-io/raven-python | 1,108 | 11111934 | <gh_stars>1000+
import logging
logger = logging.getLogger('app')
def home(request):
logger.info('Doing some division')
1 / 0
|
polytester/parsers/unittest.py | skoczen/polytester | 115 | 11111977 | <gh_stars>100-1000
from .nose import NoseParser
class UnittestParser(NoseParser):
"""Output when using `python -m unittest` is the same as nose output."""
name = "unittest"
def command_matches(self, command):
return "unittest" in command
|
common/mongo_db.py | seaglass-project/seaglass | 211 | 11111987 | <gh_stars>100-1000
from pprint import pprint
import pymongo
import time
import sys
import traceback
import os
import base64
import datetime
import common.utils as utils
import common.scan as scan
DB_INSERT_TIMEOUT = 1
# Used to prevent timeouts on cursors
BATCH_SIZE = 1000
# Nice to have some versioning
VERSION = 0
class Database():
''' This is a helpful class to handle the necessary database operations'''
def __init__(self, db_name, collection_name, host="localhost", port=27017, authentication=None):
'''Establishes the database connection
Args:
db_name (String): Name of the database
collection_name (String): Name of the collection
host (String): hostname of database
port (int): port of database
authentication ({'username' : XXX, 'password' : XXX, 'source' : XXX}):
This specifies the authentication parameters if necessary. If not specified
then no authentication is used. All of these arguments must be present in
the authenticaiton string.
'''
client = pymongo.MongoClient(host, port)
if authentication is not None:
# Raise an exception if some of the authentication params are missing
if 'username' not in authentication or \
'password' not in authentication or \
'source' not in authentication:
raise Exception("Missing critical authentication argument")
# Now do the actual authentication
client[db_name].authenticate(authentication['username'], \
authentication['password'], \
source=authentication['source'])
self.collection = client[db_name][collection_name]
def insert_sensor_point(self, full_scan, version=VERSION):
''' This will insert a scan point + gps into the database
Args:
scan (Scan): The object that represents the entire scan
'''
# Begin with the scan document
mongo_dict = full_scan.document()
rand = os.urandom(128)
mongo_dict['unique_id'] = base64.b64encode(rand).decode('utf-8')
mongo_dict['version'] = version
self.insert_mongo_point(mongo_dict)
def insert_mongo_point(self, mongo_dict):
# If the connection has a timeout then just keep trying.
# If the database is down there is no point in collecting
# data anyway.
insertion_successful = False
while not insertion_successful:
try:
# Finally insert the point and set a bool to leave the loop
utils.log("Trying to write to the DB...")
self.collection.insert_one(mongo_dict)
insertion_successful = True
utils.log("Done writing to DB.")
except Exception as e:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
traceback.print_exception(exceptionType, exceptionValue,
exceptionTraceback, file=sys.stdout)
utils.log("Error writing to DB: {}".format(e))
time.sleep(DB_INSERT_TIMEOUT)
def insert_mongo_points(self, mongo_dicts):
# If the connection has a timeout then just keep trying.
# If the database is down there is no point in collecting
# data anyway.
insertion_successful = False
while not insertion_successful:
try:
# Finally insert the point and set a bool to leave the loop
utils.log("Trying to write to the DB...")
self.collection.insert_many(mongo_dicts)
insertion_successful = True
utils.log("Done writing to DB.")
except Exception as e:
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
traceback.print_exception(exceptionType, exceptionValue,
exceptionTraceback, file=sys.stdout)
utils.log("Error writing to DB: {}".format(e))
time.sleep(DB_INSERT_TIMEOUT)
def get_scans(self, uuids=None):
'''This returns a iterable object to get all of the scan objects in the db'''
# Just start grabbing all of the points
points = self.collection.find()
# The points object is a pymongo cursor. However there are timeouts if the
# cursor reads too many points so we will set it manually
points.batch_size(BATCH_SIZE)
i = 0
for point in points:
i += 1
if i % 1000 == 0:
print("Collection point number: ", i)
if uuids is None or point['unique_id'] not in uuids:
gps_before = point['gps_before']
gps_after = point['gps_after']
gsm = point['gsm']
sensor_name = point['sensor_name']
uuid = point['unique_id']
version = point['version']
yield (scan.scan_factory(gsm, gps_before, gps_after, sensor_name), uuid, version)
|
Algo and DSA/LeetCode-Solutions-master/Python/maximum-profit-in-job-scheduling.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 11111991 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/maximum-profit-in-job-scheduling.py
# Time: O(nlogn)
# Space: O(n)
import itertools
import bisect
class Solution(object):
def jobScheduling(self, startTime, endTime, profit):
"""
:type startTime: List[int]
:type endTime: List[int]
:type profit: List[int]
:rtype: int
"""
jobs = sorted(itertools.izip(endTime, startTime, profit))
dp = [(0, 0)]
for e, s, p in jobs:
i = bisect.bisect_right(dp, (s+1, 0))-1
if dp[i][1]+p > dp[-1][1]:
dp.append((e, dp[i][1]+p))
return dp[-1][1]
# Time: O(nlogn)
# Space: O(n)
import heapq
class Solution(object):
def jobScheduling(self, startTime, endTime, profit):
"""
:type startTime: List[int]
:type endTime: List[int]
:type profit: List[int]
:rtype: int
"""
min_heap = zip(startTime, endTime, profit)
heapq.heapify(min_heap)
result = 0
while min_heap:
s, e, p = heapq.heappop(min_heap)
if s < e:
heapq.heappush(min_heap, (e, s, result+p))
else:
result = max(result, p)
return result
|
scripts/converter/convert_geometries.py | HarpieRapace45/webots | 1,561 | 11112020 | <gh_stars>1000+
#!/usr/bin/env python3
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert R2020b world file from the NUE to the ENU coordinate system."""
from transforms3d import quaternions
from webots_parser import WebotsParser
coordinateSystem = 'ENU'
def rotation(value, r):
# if value == ['0', '0', '1', '1.57'] and r == ['1', '0', '0', '-1.57079632679']:
# return [WebotsParser.str(0), WebotsParser.str(1), WebotsParser.str(0), WebotsParser.str(-1.57079632679)]
q0 = quaternions.axangle2quat([float(value[0]), float(value[1]), float(value[2])], float(value[3]))
q1 = quaternions.axangle2quat([float(r[0]), float(r[1]), float(r[2])], float(r[3]))
qr = quaternions.qmult(q1, q0)
v, theta = quaternions.quat2axangle(qr)
return [WebotsParser.str(v[0]), WebotsParser.str(v[1]), WebotsParser.str(v[2]), WebotsParser.str(theta)]
def createNewTransform():
if coordinateSystem == 'ENU':
return {'fields': [{'name': 'Geometrics_conversion', 'value': 'Geometrics_conversion',
'type': 'SFString'}, {'name': 'rotation', 'value': ['1', '0', '0', '1.57079632679'],
'type': 'SFRotation'},
{'name': 'children', 'type': 'MFNode', 'value': []}],
'type': 'node', 'name': 'Transform'}
else:
return {'fields': [{'name': 'Geometrics_conversion', 'value': 'Geometrics_conversion',
'type': 'SFString'}, {'name': 'rotation', 'value': ['1', '0', '0', '-1.57079632679'],
'type': 'SFRotation'},
{'name': 'children', 'type': 'MFNode', 'value': []}],
'type': 'node', 'name': 'Transform'}
def convert_children(node, parent):
if 'USE' in node:
return
if node['name'] in 'Shape':
for field in node['fields']:
if field['name'] in ['geometry'] and 'USE' not in field['value'] and field['value']['name'] in ['Cylinder', 'Cone',
'Capsule',
'ElevationGrid',
'Plane']:
isDef = False
defName = None
position = -1
for index in range(0, len(parent)):
if parent[index] == node:
position = index
# We need to transfer the def of the geometry to the transform
if 'DEF' in field['value']:
defName = field['value']['DEF']
isDef = True
field['value'].pop('DEF')
# We need to transfer the def of the shape to the transform
if 'DEF' in node:
defName = node['DEF']
isDef = True
node.pop('DEF')
newTransform = createNewTransform()
for param in newTransform['fields']:
if param['name'] in 'children':
param['value'] = [node]
if isDef:
newTransform['DEF'] = defName
parent[position] = newTransform
# Case of boundingObject
elif node['name'] in ['Cylinder', 'Capsule', 'ElevationGrid', 'Plane']:
newTransform = createNewTransform()
for param in newTransform['fields']:
if param['name'] in 'children':
param['value'] = [node]
# Case where this is a geometry directly inserted in a boundingObject
if 'fields' in parent:
for field in parent['fields']:
if field['name'] in 'boundingObject':
field['value'] = newTransform
# Case where this is a geometry in a transform
else:
parent.remove(node)
parent.append(newTransform)
for field in node['fields']:
if field['name'] in 'Geometrics_conversion':
break
elif field['name'] in 'children':
for child in field['value']:
convert_children(child, field['value'])
elif field['name'] in 'endPoint':
convert_children(field['value'], field['value'])
elif field['name'] in 'boundingObject':
convert_children(field['value'], node)
def cleanTransform(node):
for field in node['fields']:
if field['name'] in 'Geometrics_conversion':
node['fields'].remove(field)
break
elif field['name'] in 'children':
for child in field['value']:
cleanTransform(child)
elif field['name'] in ['endPoint', 'boundingObject']:
cleanTransform(field['value'])
def squashUniqueTransform(node):
if 'USE' in node:
return
if node['name'] in ['Transform']:
for field in node['fields']:
if field['name'] in 'children':
if len(field['value']) == 1 and field['value'][0]['name'] in 'Transform' and 'DEF' not in field['value'][0]:
childT = field['value'][0]
for fieldC in childT['fields']:
if fieldC['name'] == 'Geometrics_conversion':
mergeTransform(node, childT)
for field in node['fields']:
if field['name'] in 'children':
for child in field['value']:
squashUniqueTransform(child)
elif field['name'] in ['endPoint', 'boundingObject']:
squashUniqueTransform(field['value'])
def mergeTransform(parent, child):
childRotation = None
isChildRotation = False
childShape = None
isChildShape = False
for childField in child['fields']:
if childField['name'] in 'rotation':
childRotation = childField['value']
isChildRotation = True
if childField['name'] in 'children':
if childField['value'][0]['name'] in ['Shape', 'Cylinder', 'Capsule', 'ElevationGrid', 'Plane']:
childShape = childField['value'][0]
isChildShape = True
if isChildRotation and isChildShape:
isParentRotation = False
for parentField in parent['fields']:
if parentField['name'] in 'rotation':
parentField['value'] = rotation(parentField['value'], childRotation)
isParentRotation = True
if parentField['name'] in 'children':
parentField['value'] = [childShape]
if not isParentRotation:
parent['fields'].append({'name': 'rotation',
'value': childRotation,
'type': 'SFRotation'})
def convert_to_enu(filename):
world = WebotsParser()
world.load(filename)
global coordinateSystem
print("Add transforms")
for node in world.content['root']:
if node['name'] == 'WorldInfo':
for field in node['fields']:
if field['name'] in 'coordinateSystem':
coordinateSystem = field['value']
else:
convert_children(node, world.content['root'])
print("Merge transforms")
for node in world.content['root']:
squashUniqueTransform(node)
cleanTransform(node)
world.save(filename)
if __name__ == "__main__":
# execute only if run as a script
# for filename in sys.argv:
# if not filename.endswith('.wbt'):
# continue
filename = "tests/api/worlds/range_finder.wbt"
print(filename)
convert_to_enu(filename)
|
mapproxy/cache/__init__.py | cunha17/mapproxy | 347 | 11112043 | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tile caching (creation, caching and retrieval of tiles).
.. digraph:: Schematic Call Graph
ranksep = 0.1;
node [shape="box", height="0", width="0"]
cl [label="CacheMapLayer" href="<mapproxy.layer.CacheMapLayer>"]
tm [label="TileManager", href="<mapproxy.cache.tile.TileManager>"];
fc [label="FileCache", href="<mapproxy.cache.file.FileCache>"];
s [label="Source", href="<mapproxy.source.Source>"];
{
cl -> tm [label="load_tile_coords"];
tm -> fc [label="load\\nstore\\nis_cached"];
tm -> s [label="get_map"]
}
"""
|
tutorials/W0D3_LinearAlgebra/solutions/W0D3_Tutorial2_Solution_a86560cc.py | eduardojdiniz/CompNeuro | 2,294 | 11112058 | <reponame>eduardojdiniz/CompNeuro
A = np.array([[-1, 0], [0, 1]])
# Uncomment to visualize transformation
plot_linear_transformation(A) |
ProofOfConcepts/Vision/OpenMvStereoVision/src/target_code/lens_correction.py | WoodData/EndpointAI | 190 | 11112082 | # Lens Correction
import sensor, image, time
sensor.reset()
sensor.set_framesize(sensor.VGA)
sensor.set_pixformat(sensor.RGB565)
sensor.set_windowing((160,120,320,240))
sensor.skip_frames(time = 2000)
data_fb = sensor.alloc_extra_fb(320, 240, sensor.RGB565)
clock = time.clock()
count = 0
remap = False
while(True):
clock.tick()
# test without remap, with remap QVGA and with remap QQVGA
img = sensor.snapshot()
if remap:
img.remap(data_fb, right=False, upside_down=False)
count += 1
if count == 100:
print("remap QVGA GRAYSCALE")
remap = True
sensor.set_framesize(sensor.VGA)
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_windowing((160,120,320,240))
elif count == 200:
print("remap QVGA")
remap = True
sensor.set_framesize(sensor.VGA)
sensor.set_pixformat(sensor.RGB565)
sensor.set_windowing((160,120,320,240))
elif count == 300:
print("remap QQVGA")
remap = True
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)
sensor.set_windowing((78,60,160,120))
elif count == 400:
print("no remap VGA")
remap = False
sensor.set_framesize(sensor.VGA)
sensor.set_pixformat(sensor.RGB565)
sensor.set_windowing((160,120,320,240))
count = 0
|
src/dal_select2_queryset_sequence/fields.py | epoiate/django-autocomplete-light | 1,368 | 11112087 | """Autocomplete fields for Select2GenericForeignKey choices."""
from dal_queryset_sequence.fields import QuerySetSequenceModelField
from dal_select2_queryset_sequence.views import Select2QuerySetSequenceAutoView
from dal_select2_queryset_sequence.widgets import QuerySetSequenceSelect2
from django.conf.urls import url
from queryset_sequence import QuerySetSequence
class Select2GenericForeignKeyModelField(QuerySetSequenceModelField):
"""
Select2GenericForeignKeyModelField class.
Field that generate automatically the view for the
QuerySetSequenceSelect2 widget
"""
def __init__(self, *args, **kwargs):
"""
Initialize Select2GenericForeignKeyModelField.
:param args:
:param model_choice:
[(Model, 'filter_by', [('forwardfield_name', 'filter_by')]), ],
List of tuples, for each select2 widget. Model is the model to
query, 'filter_by' the attribute of the model to apply the filter.
The list in the tuple is optional, its to forward a field from the
form to the widget.
:param field_id: Optional name instead of the automatic one
:param kwargs:
"""
model_choice = kwargs.pop('model_choice', None)
field_id = kwargs.pop('field_id', None)
self.field_id = field_id if field_id else id(self)
if model_choice:
self.model_choice = model_choice
models_queryset = [model[0].objects.all()
for model in model_choice]
kwargs['queryset'] = QuerySetSequence(*models_queryset)
super(Select2GenericForeignKeyModelField, self).__init__(*args, **kwargs)
def as_url(self, form):
"""Return url."""
url_name = '{}_autocomp_{}'.format(form.__name__, self.field_id)
forward_fields = {
forward_tuple[0]
for field in self.model_choice if len(field) > 2
for forward_tuple in field[2]
}
# a set of the fields to forward.
# it checks if the 3rd index of the list exists
self.widget = QuerySetSequenceSelect2(
url=url_name, forward=forward_fields
)
# generate the class to work with multiple gfk
# (can't work on instance level)
auto_view = type(
'Autoview{}{}'.format(form.__name__, self.field_id),
(Select2QuerySetSequenceAutoView,),
{'model_choice': self.model_choice}
) # send to the view the model and filter list
return url(
r'^{}_{}_autocomp$'.format(form.__name__, self.field_id),
auto_view.as_view(),
name=url_name
)
|
desktop/core/ext-py/python-ldap-2.3.13/Tests/Lib/ldap/schema/test_tokenizer.py | kokosing/hue | 5,079 | 11112103 | <gh_stars>1000+
import ldap.schema
from ldap.schema.tokenizer import split_tokens,extract_tokens
testcases_split_tokens = (
(" BLUBBER DI BLUBB ", ["BLUBBER", "DI", "BLUBB"]),
("BLUBBER DI BLUBB",["BLUBBER","DI","BLUBB"]),
("BLUBBER DI BLUBB ",["BLUBBER","DI","BLUBB"]),
("BLUBBER DI 'BLUBB' ",["BLUBBER","DI","BLUBB"]),
("BLUBBER ( DI ) 'BLUBB' ",["BLUBBER","(","DI",")","BLUBB"]),
("BLUBBER(DI)",["BLUBBER","(","DI",")"]),
("BLUBBER ( DI)",["BLUBBER","(","DI",")"]),
("BLUBBER ''",["BLUBBER",""]),
("( BLUBBER (DI 'BLUBB'))",["(","BLUBBER","(","DI","BLUBB",")",")"]),
("BLUBB (DA$BLAH)",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB ( DA $ BLAH )",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB (DA$ BLAH)",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB (DA $BLAH)",['BLUBB',"(","DA","BLAH",")"]),
("BLUBB 'DA$BLAH'",['BLUBB',"DA$BLAH"]),
("BLUBB DI 'BLU B B ER' DA 'BLAH' ",['BLUBB','DI','BLU B B ER','DA','BLAH']),
("BLUBB DI 'BLU B B ER' DA 'BLAH' LABER",['BLUBB','DI','BLU B B ER','DA','BLAH','LABER']),
("BLUBBER DI 'BLU'BB ER' DA 'BLAH' ", ["BLUBBER", "DI", "BLU'BB ER", "DA", "BLAH"]), # for Oracle
("BLUBB DI 'BLU B B ER'MUST 'BLAH' ",['BLUBB','DI','BLU B B ER','MUST','BLAH']) # for Oracle
)
for t,r in testcases_split_tokens:
l = ldap.schema.tokenizer.split_tokens(t,{'MUST':None})
if l!=r:
print 'String:',repr(t)
print '=>',l
print 'differs from',r
|
bcs-ui/backend/templatesets/legacy_apps/configuration/migrations/0034_auto_20200106_1455.py | laodiu/bk-bcs | 599 | 11112118 | <reponame>laodiu/bk-bcs<filename>bcs-ui/backend/templatesets/legacy_apps/configuration/migrations/0034_auto_20200106_1455.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 1.11.5 on 2020-01-06 06:55
from __future__ import unicode_literals
from django.db import migrations, models
import backend.templatesets.legacy_apps.configuration.models.mixins
class Migration(migrations.Migration):
dependencies = [
('configuration', '0033_auto_20191103_2342'),
]
operations = [
migrations.CreateModel(
name='Ingress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=32, verbose_name='创建者')),
('updator', models.CharField(max_length=32, verbose_name='更新者')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted_time', models.DateTimeField(blank=True, null=True)),
('config', models.TextField(verbose_name='配置信息')),
('name', models.CharField(default='', max_length=255, verbose_name='名称')),
],
options={
'ordering': ('created',),
'abstract': False,
},
bases=(
models.Model,
backend.templatesets.legacy_apps.configuration.models.mixins.MConfigMapAndSecretMixin,
),
),
migrations.AlterField(
model_name='resourcefile',
name='resource_name',
field=models.CharField(
choices=[
('Deployment', 'Deployment'),
('Service', 'Service'),
('ConfigMap', 'ConfigMap'),
('Secret', 'Secret'),
('Ingress', 'Ingress'),
('StatefulSet', 'StatefulSet'),
('DaemonSet', 'DaemonSet'),
('Job', 'Job'),
('HPA', 'HPA'),
('ServiceAccount', 'ServiceAccount'),
('ClusterRole', 'ClusterRole'),
('ClusterRoleBinding', 'ClusterRoleBinding'),
('PodDisruptionBudget', 'PodDisruptionBudget'),
('StorageClass', 'StorageClass'),
(('PersistentVolume',), 'PersistentVolume'),
('PersistentVolumeClaim', 'PersistentVolumeClaim'),
],
max_length=32,
),
),
]
|
examples/ConsPortfolioModel/example_ConsPortfolioModel.py | AMonninger/HARK | 264 | 11112119 | # %%
"""
Example implementations of HARK.ConsumptionSaving.ConsPortfolioModel
"""
from copy import copy
from time import time
import matplotlib.pyplot as plt
import numpy as np
from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle
from HARK.ConsumptionSaving.ConsPortfolioModel import (
PortfolioConsumerType,
init_portfolio,
)
from HARK.utilities import plot_funcs
# %%
# Make and solve an example portfolio choice consumer type
print("Now solving an example portfolio choice problem; this might take a moment...")
MyType = PortfolioConsumerType()
MyType.cycles = 0
t0 = time()
MyType.solve()
t1 = time()
MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)]
MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)]
print(
"Solving an infinite horizon portfolio choice problem took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print("Consumption function over market resources:")
plot_funcs(MyType.cFunc[0], 0.0, 20.0)
print("Risky asset share as a function of market resources:")
print("Optimal (blue) versus Theoretical Limit (orange)")
plt.xlabel("Normalized Market Resources")
plt.ylabel("Portfolio Share")
plt.ylim(0.0, 1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plot_funcs(
[
MyType.ShareFunc[0],
lambda m: MyType.ShareLimit * np.ones_like(m),
],
0.0,
200.0,
)
# %%
# Now simulate this consumer type
MyType.track_vars = ["cNrm", "Share", "aNrm", "t_age"]
MyType.T_sim = 100
MyType.initialize_sim()
MyType.simulate()
# %%
print("\n\n\n")
print("For derivation of the numerical limiting portfolio share")
print("as market resources approach infinity, see")
print(
"http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/"
)
# %%
""
# Make another example type, but this one optimizes risky portfolio share only
# on the discrete grid of values implicitly chosen by RiskyCount, using explicit
# value maximization.
init_discrete_share = init_portfolio.copy()
init_discrete_share["DiscreteShareBool"] = True
# Have to actually construct value function for this to work
init_discrete_share["vFuncBool"] = True
# %%
# Make and solve a discrete portfolio choice consumer type
print("Now solving a discrete choice portfolio problem; this might take a minute...")
DiscreteType = PortfolioConsumerType(**init_discrete_share)
DiscreteType.cycles = 0
t0 = time()
DiscreteType.solve()
t1 = time()
DiscreteType.cFunc = [
DiscreteType.solution[t].cFuncAdj for t in range(DiscreteType.T_cycle)
]
DiscreteType.ShareFunc = [
DiscreteType.solution[t].ShareFuncAdj for t in range(DiscreteType.T_cycle)
]
print(
"Solving an infinite horizon discrete portfolio choice problem took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print("Consumption function over market resources:")
plot_funcs(DiscreteType.cFunc[0], 0.0, 50.0)
print("Risky asset share as a function of market resources:")
print("Optimal (blue) versus Theoretical Limit (orange)")
plt.xlabel("Normalized Market Resources")
plt.ylabel("Portfolio Share")
plt.ylim(0.0, 1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plot_funcs(
[DiscreteType.ShareFunc[0], lambda m: DiscreteType.ShareLimit * np.ones_like(m)],
0.0,
200.0,
)
# %%
print("\n\n\n")
# %%
""
# Make another example type, but this one can only update their risky portfolio
# share in any particular period with 15% probability.
init_sticky_share = init_portfolio.copy()
init_sticky_share["AdjustPrb"] = 0.15
# %%
# Make and solve a discrete portfolio choice consumer type
print(
'Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...'
)
StickyType = PortfolioConsumerType(**init_sticky_share)
StickyType.cycles = 0
t0 = time()
StickyType.solve()
t1 = time()
StickyType.cFuncAdj = [
StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle)
]
StickyType.cFuncFxd = [
StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle)
]
StickyType.ShareFunc = [
StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle)
]
print(
"Solving an infinite horizon sticky portfolio choice problem took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print(
"Consumption function over market resources when the agent can adjust his portfolio:"
)
plot_funcs(StickyType.cFuncAdj[0], 0.0, 50.0)
# %%
print(
"Consumption function over market resources when the agent CAN'T adjust, by current share:"
)
M = np.linspace(0.0, 50.0, 200)
for s in np.linspace(0.0, 1.0, 21):
C = StickyType.cFuncFxd[0](M, s * np.ones_like(M))
plt.plot(M, C)
plt.xlim(0.0, 50.0)
plt.ylim(0.0, None)
plt.show()
# %%
print("Risky asset share function over market resources (when possible to adjust):")
print("Optimal (blue) versus Theoretical Limit (orange)")
plt.xlabel("Normalized Market Resources")
plt.ylabel("Portfolio Share")
plt.ylim(0.0, 1.0)
plot_funcs(
[StickyType.ShareFunc[0], lambda m: StickyType.ShareLimit * np.ones_like(m)],
0.0,
200.0,
)
# %%
""
# Make another example type, but this one has *age-varying* perceptions of risky asset returns.
# Begin by making a lifecycle dictionary, but adjusted for the portfolio choice model.
init_age_varying_risk_perceptions = copy(init_lifecycle)
init_age_varying_risk_perceptions["RiskyCount"] = init_portfolio["RiskyCount"]
init_age_varying_risk_perceptions["ShareCount"] = init_portfolio["ShareCount"]
init_age_varying_risk_perceptions["aXtraMax"] = init_portfolio["aXtraMax"]
init_age_varying_risk_perceptions["aXtraCount"] = init_portfolio["aXtraCount"]
init_age_varying_risk_perceptions["aXtraNestFac"] = init_portfolio["aXtraNestFac"]
init_age_varying_risk_perceptions["BoroCnstArt"] = init_portfolio["BoroCnstArt"]
init_age_varying_risk_perceptions["CRRA"] = init_portfolio["CRRA"]
init_age_varying_risk_perceptions["DiscFac"] = init_portfolio["DiscFac"]
# %%
init_age_varying_risk_perceptions["RiskyAvg"] = [1.08] * init_lifecycle["T_cycle"]
init_age_varying_risk_perceptions["RiskyStd"] = list(
np.linspace(0.20, 0.30, init_lifecycle["T_cycle"])
)
init_age_varying_risk_perceptions["RiskyAvgTrue"] = 1.08
init_age_varying_risk_perceptions["RiskyStdTrue"] = 0.20
AgeVaryingRiskPercType = PortfolioConsumerType(**init_age_varying_risk_perceptions)
AgeVaryingRiskPercType.cycles = 1
# %%
# Solve the agent type with age-varying risk perceptions
# print('Now solving a portfolio choice problem with age-varying risk perceptions...')
t0 = time()
AgeVaryingRiskPercType.solve()
AgeVaryingRiskPercType.cFunc = [
AgeVaryingRiskPercType.solution[t].cFuncAdj
for t in range(AgeVaryingRiskPercType.T_cycle)
]
AgeVaryingRiskPercType.ShareFunc = [
AgeVaryingRiskPercType.solution[t].ShareFuncAdj
for t in range(AgeVaryingRiskPercType.T_cycle)
]
t1 = time()
print(
"Solving a "
+ str(AgeVaryingRiskPercType.T_cycle)
+ " period portfolio choice problem with age-varying risk perceptions took "
+ str(t1 - t0)
+ " seconds."
)
# %%
# Plot the consumption and risky-share functions
print("Consumption function over market resources in each lifecycle period:")
plot_funcs(AgeVaryingRiskPercType.cFunc, 0.0, 20.0)
print("Risky asset share function over market resources in each lifecycle period:")
plot_funcs(AgeVaryingRiskPercType.ShareFunc, 0.0, 200.0)
# %% [markdown]
# The code below tests the mathematical limits of the model.
# %%
# Create a grid of market resources for the plots
mMin = 0 # Minimum ratio of assets to income to plot
mMax = 5 * 1e2 # Maximum ratio of assets to income to plot
mPts = 1000 # Number of points to plot
eevalgrid = np.linspace(0, mMax, mPts) # range of values of assets for the plot
# Number of points that will be used to approximate the risky distribution
risky_count_grid = [5, 50, 200]
# Plot by ages (time periods) at which to plot. We will use the default life-cycle calibration.
ages = [2, 4, 6, 8]
# Create lifecycle dictionary with portfolio choice parameters
merton_dict = copy(init_lifecycle)
merton_dict["RiskyCount"] = init_portfolio["RiskyCount"]
merton_dict["ShareCount"] = init_portfolio["ShareCount"]
merton_dict["aXtraMax"] = init_portfolio["aXtraMax"]
merton_dict["aXtraCount"] = init_portfolio["aXtraCount"]
merton_dict["aXtraNestFac"] = init_portfolio["aXtraNestFac"]
merton_dict["BoroCnstArt"] = init_portfolio["BoroCnstArt"]
merton_dict["CRRA"] = init_portfolio["CRRA"]
merton_dict["DiscFac"] = init_portfolio["DiscFac"]
merton_dict["RiskyAvgTrue"] = 1.08
merton_dict["RiskyStdTrue"] = 0.20
# Create a function to compute the Merton-Samuelson limiting portfolio share.
def RiskyShareMertSamLogNormal(RiskPrem, CRRA, RiskyVar):
return RiskPrem / (CRRA * RiskyVar)
# %% Calibration and solution
for rcount in risky_count_grid:
# Create a new dictionary and replace the number of points that
# approximate the risky return distribution
# Create new dictionary copying the default
merton_dict["RiskyCount"] = rcount
# Create and solve agent
agent = PortfolioConsumerType(**merton_dict)
agent.solve()
# Compute the analytical Merton-Samuelson limiting portfolio share
RiskyVar = agent.RiskyStd ** 2
RiskPrem = agent.RiskyAvg - agent.Rfree
MS_limit = RiskyShareMertSamLogNormal(RiskPrem, agent.CRRA, RiskyVar)
# Now compute the limiting share numerically, using the approximated
# distribution
agent.update_ShareLimit()
NU_limit = agent.ShareLimit
plt.figure()
for a in ages:
plt.plot(
eevalgrid, agent.solution[a].ShareFuncAdj(eevalgrid), label="t = %i" % (a)
)
plt.axhline(
NU_limit, c="k", ls="-.", label="Exact limit as $m\\rightarrow \\infty$."
)
plt.axhline(
MS_limit, c="k", ls="--", label="M&S Limit without returns discretization."
)
plt.ylim(0, 1.05)
plt.xlim(eevalgrid[0] + 1, eevalgrid[-1])
plt.xscale("log")
plt.legend()
plt.title(
"Risky Portfolio Share by Age\n Risky distribution with {points} equiprobable points".format(
points=rcount
)
)
plt.xlabel("Wealth (m)")
plt.ioff()
plt.draw()
# %%
# %%
|
isaacgymenvs/rl_games/common/interval_summary_writer.py | ZaneZh/IsaacGymEnvs | 193 | 11112151 | <gh_stars>100-1000
import time
class IntervalSummaryWriter:
"""
Summary writer wrapper designed to reduce the size of tf.events files.
It will prevent the learner from writing the summaries more often than a specified interval, i.e. if the
current interval is 20 seconds and we wrote our last summary for a particular summary key at 01:00, all summaries
until 01:20 for that key will be ignored.
The interval is adaptive: it will approach 1/200th of the total training time, but no less than interval_sec_min
and no greater than interval_sec_max.
This was created to facilitate really big training runs, such as with Population-Based training, where summary
folders reached tens of gigabytes.
"""
def __init__(self, summary_writer, cfg):
self.experiment_start = time.time()
# prevents noisy summaries when experiments are restarted
self.defer_summaries_sec = cfg.get('defer_summaries_sec', 5)
self.interval_sec_min = cfg.get('summaries_interval_sec_min', 5)
self.interval_sec_max = cfg.get('summaries_interval_sec_max', 300)
self.last_interval = self.interval_sec_min
# interval between summaries will be close to this fraction of the total training time,
# i.e. for a run that lasted 200 minutes we write one summary every minute.
self.summaries_relative_step = 1.0 / 200
self.writer = summary_writer
self.last_write_for_tag = dict()
def _calc_interval(self):
"""Write summaries more often in the beginning of the run."""
if self.last_interval >= self.interval_sec_max:
return self.last_interval
seconds_since_start = time.time() - self.experiment_start
interval = seconds_since_start * self.summaries_relative_step
interval = min(interval, self.interval_sec_max)
interval = max(interval, self.interval_sec_min)
self.last_interval = interval
return interval
def add_scalar(self, tag, value, step, *args, **kwargs):
if step == 0:
# removes faulty summaries that appear after the experiment restart
# print('Skip summaries with step=0')
return
seconds_since_start = time.time() - self.experiment_start
if seconds_since_start < self.defer_summaries_sec:
return
last_write = self.last_write_for_tag.get(tag, 0)
seconds_since_last_write = time.time() - last_write
interval = self._calc_interval()
if seconds_since_last_write >= interval:
self.writer.add_scalar(tag, value, step, *args, **kwargs)
self.last_write_for_tag[tag] = time.time()
def __getattr__(self, attr):
return getattr(self.writer, attr) |
Stephanie/Modules/weather_report_module.py | JeremyARussell/stephanie-va | 866 | 11112156 | import datetime
import pyowm
from Stephanie.Modules.base_module import BaseModule
from Stephanie.local_libs.numbers_format import NumberService
class WeatherReportModule(BaseModule):
def __init__(self, *args):
super(WeatherReportModule, self).__init__(*args)
self.api_key = self.get_configuration("open_weather_map_api_key")
if self.api_key:
self.owm = pyowm.OWM(self.api_key)
else:
return False
self.city = self.get_configuration(section="USER", key="city")
self.num_service = NumberService()
# def weather_information(self):
# self.assistant.say("What would you like to know about?")
# query = self.assistant.listen().decipher()
def weather_report_weekly(self):
temp_unit = 'celsius'
# some problem with self.city variable
forecast = self.owm.daily_forecast(self.city)
fore = forecast.get_forecast()
location = fore.get_location().get_name()
weather_report = self.get_weather_report_weekly(forecast, location, temp_unit, report="weekly")
return weather_report
def weather_report_today(self):
temp_unit = 'celsius'
report = 'current'
cw = self.owm.weather_at_place(self.city)
loc = cw.get_location().get_name()
weather = cw.get_weather()
weather_report = self.get_weather_report(weather, loc, temp_unit, report)
return weather_report
def weather_report_tomorrow(self):
temp_unit = 'celsius'
report = 'tomorrow'
forecast = self.owm.daily_forecast(self.city)
fore = forecast.get_forecast()
loc = fore.get_location().get_name()
tomorrow = pyowm.timeutils.tomorrow()
weather = forecast.get_weather_at(tomorrow)
weather_report = self.get_weather_report(weather, loc, temp_unit, report)
return weather_report
def get_weather_report(self, weather, loc, temp_unit='celsius', report='current'):
weather_report = 'Server Down.'
wind = weather.get_wind()
wind_speed = self.num_service.parseMagnitude(wind["speed"])
humi = self.num_service.parseMagnitude(weather.get_humidity())
if weather.get_clouds() > 0:
clou = self.num_service.parseMagnitude(weather.get_clouds())
else:
clou = "zero"
stat = weather.get_status()
detstat = weather.get_detailed_status()
if report == 'current':
temp = weather.get_temperature(temp_unit)
temp_max = self.num_service.parseMagnitude(temp['temp_max'])
temp_min = self.num_service.parseMagnitude(temp['temp_min'])
curr_temp = self.num_service.parseMagnitude(temp['temp'])
weather_report = "Weather at " + loc + ". Today is " + stat + ". There is a chance of " \
+ detstat + ". Now Temperature is " + curr_temp + " degree " \
+ temp_unit + ". Humidity " + humi + " percent. Wind Speed " \
+ wind_speed + ". with cloud cover " + clou + " percent."
elif report == 'tomorrow':
temp = weather.get_temperature(temp_unit)
temp_morn = self.num_service.parseMagnitude(temp['morn'])
temp_day = self.num_service.parseMagnitude(temp['day'])
temp_night = self.num_service.parseMagnitude(temp['night'])
weather_report = "Weather at " + loc + ". Tomorrow will be " + stat + ". There will be a chance of " \
+ detstat + ". Temperature in the morning " + temp_morn + " degree " \
+ temp_unit + ". Days Temperature will be " + temp_day + " degree " \
+ temp_unit + ". and Temperature at night will be " + temp_night + " degree " \
+ temp_unit + ". Humidity " + humi + " percent. Wind Speed " \
+ wind_speed + ". with clouds cover " + clou + " percent."
return weather_report
def get_weather_report_weekly(self, forecast, loc, temp_unit='celsius', report='current'):
weather_report = "Weather forecast for next week at " + loc + ". "
rainy_days = len(forecast.when_rain())
if rainy_days > 0:
rainy_days_str = "Rainy Days are. "
for d in range(rainy_days):
rain_day = forecast.when_rain()[d].get_reference_time()
date_str = self.format_time_stamp(rain_day)
rainy_days_str += date_str + ". "
weather_report += rainy_days_str
date_str = ''
most_rainy = forecast.most_rainy()
if most_rainy:
weather_report += "You will observe heavy rain on. "
ref_time = most_rainy.get_reference_time()
date_str = self.format_time_stamp(ref_time)
weather_report += date_str + ". "
date_str = ''
sunny_days = len(forecast.when_sun())
if sunny_days > 0:
sunny_days_str = "Sunny Days are. "
for d in range(sunny_days):
sunny_day = forecast.when_sun()[d].get_reference_time()
date_str = self.format_time_stamp(sunny_day)
sunny_days_str += date_str + ". "
weather_report += sunny_days_str
date_str = ''
most_hot = forecast.most_hot()
if most_hot:
weather_report += "You will feel heat on. "
ref_time = most_hot.get_reference_time()
date_str = self.format_time_stamp(ref_time)
weather_report += date_str + ". "
date_str = ''
most_windy = forecast.most_windy()
if most_windy:
weather_report += "Most windy day will be. "
ref_time = most_windy.get_reference_time()
date_str = self.format_time_stamp(ref_time)
weather_report += date_str + ". "
date_str = ''
most_humid = forecast.most_humid()
if most_humid:
weather_report += "Most humid day will be. "
ref_time = most_humid.get_reference_time()
date_str = self.format_time_stamp(ref_time)
weather_report += date_str + ". "
date_str = ''
most_cold = forecast.most_cold()
if most_cold:
weather_report += "Coolest day will be. "
ref_time = most_cold.get_reference_time()
date_str = self.format_time_stamp(ref_time)
weather_report += date_str + ". "
date_str = ''
return weather_report
@staticmethod
def format_time_stamp(unix_time):
return datetime.datetime.fromtimestamp(unix_time).strftime("%B %d")
|
hn/__init__.py | movermeyer/HackerNewsAPI | 132 | 11112165 | <filename>hn/__init__.py<gh_stars>100-1000
"""
Python API for Hacker News.
@author <NAME>
@email <EMAIL>
"""
__title__ = 'hackernews'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Karan Goel'
from .hn import HN, Story
|
Configuration/AlCa/python/autoCondModifiers.py | malbouis/cmssw | 852 | 11112208 | ##
## Append for 0T conditions
##
from Configuration.StandardSequences.CondDBESSource_cff import GlobalTag as essource
connectionString = essource.connect.value()
# method called in autoCond
def autoCond0T(autoCond):
ConditionsFor0T = ','.join( ['RunInfo_0T_v1_mc', "RunInfoRcd", connectionString, "", "2020-07-01 12:00:00.000"] )
GlobalTags0T = {}
for key,val in autoCond.items():
if "phase" in key: # restrict to phase1 upgrade GTs
GlobalTags0T[key+"_0T"] = (autoCond[key], ConditionsFor0T)
autoCond.update(GlobalTags0T)
return autoCond
def autoCondHLTHI(autoCond):
GlobalTagsHLTHI = {}
# emulate hybrid ZeroSuppression on the VirginRaw data of 2015
FullPedestalsForHLTHI = ','.join( ['SiStripFullPedestals_GR10_v1_hlt', "SiStripPedestalsRcd", connectionString, "", "2021-03-11 12:00:00.000"] )
MenuForHLTHI = ','.join( ['L1Menu_CollisionsHeavyIons2015_v5_uGT_xml', "L1TUtmTriggerMenuRcd", connectionString, "", "2021-03-11 12:00:00.000"] )
for key,val in autoCond.items():
if key == 'run2_hlt_relval': # modification of HLT relval GT
GlobalTagsHLTHI['run2_hlt_hi'] = (autoCond[key], FullPedestalsForHLTHI, MenuForHLTHI)
autoCond.update(GlobalTagsHLTHI)
return autoCond
def autoCondDD4HEP(autoCond):
GlobalTagsDDHEP = {}
# substitute the DDD geometry tags with DD4HEP ones
CSCRECODIGI_Geometry_dd4hep = ','.join( ['CSCRECODIGI_Geometry_120DD4hepV1' , "CSCRecoDigiParametersRcd", connectionString, "", "2021-09-28 12:00:00.000"] )
CSCRECO_Geometry_dd4hep = ','.join( ['CSCRECO_Geometry_120DD4hepV1' , "CSCRecoGeometryRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
DTRECO_Geometry_dd4hep = ','.join( ['DTRECO_Geometry_120DD4hepV1' , "DTRecoGeometryRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
GEMRECO_Geometry_dd4hep = ','.join( ['GEMRECO_Geometry_120DD4hepV1' , "GEMRecoGeometryRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
XMLFILE_Geometry_dd4hep = ','.join( ['XMLFILE_Geometry_121DD4hepV1_Extended2021_mc', "GeometryFileRcd" , connectionString, "Extended", "2021-09-28 12:00:00.000"] )
HCALParameters_Geometry_dd4hep = ','.join( ['HCALParameters_Geometry_120DD4hepV1' , "HcalParametersRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
TKRECO_Geometry_dd4hep = ','.join( ['TKRECO_Geometry_121DD4hepV1' , "IdealGeometryRecord" , connectionString, "", "2021-09-28 12:00:00.000"] )
CTRECO_Geometry_dd4hep = ','.join( ['CTRECO_Geometry_120DD4hepV1' , "PCaloTowerRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
EBRECO_Geometry_dd4hep = ','.join( ['EBRECO_Geometry_120DD4hepV1' , "PEcalBarrelRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
EERECO_Geometry_dd4hep = ','.join( ['EERECO_Geometry_120DD4hepV1' , "PEcalEndcapRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
EPRECO_Geometry_dd4hep = ','.join( ['EPRECO_Geometry_120DD4hepV1' , "PEcalPreshowerRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
HCALRECO_Geometry_dd4hep = ','.join( ['HCALRECO_Geometry_120DD4hepV1' , "PHcalRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
TKParameters_Geometry_dd4hep = ','.join( ['TKParameters_Geometry_120DD4hepV1' , "PTrackerParametersRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
ZDCRECO_Geometry_dd4hep = ','.join( ['ZDCRECO_Geometry_120DD4hepV1' , "PZdcRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
RPCRECO_Geometry_dd4hep = ','.join( ['RPCRECO_Geometry_120DD4hepV1' , "RPCRecoGeometryRcd" , connectionString, "", "2021-09-28 12:00:00.000"] )
for key,val in autoCond.items():
if key == 'phase1_2021_realistic': # modification of the DD4HEP relval GT
GlobalTagsDDHEP['phase1_2021_dd4hep'] = (autoCond[key],
CSCRECODIGI_Geometry_dd4hep,
CSCRECO_Geometry_dd4hep,
DTRECO_Geometry_dd4hep,
GEMRECO_Geometry_dd4hep,
XMLFILE_Geometry_dd4hep,
HCALParameters_Geometry_dd4hep,
TKRECO_Geometry_dd4hep,
CTRECO_Geometry_dd4hep,
EBRECO_Geometry_dd4hep,
EERECO_Geometry_dd4hep,
EPRECO_Geometry_dd4hep,
HCALRECO_Geometry_dd4hep,
TKParameters_Geometry_dd4hep,
ZDCRECO_Geometry_dd4hep,
RPCRECO_Geometry_dd4hep)
autoCond.update(GlobalTagsDDHEP)
return autoCond
|
flow-python/examples/warehouse/detection_memd/onnx_model.py | chentyjpm/MegFlow | 303 | 11112217 | # MegFlow is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2019-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#!/usr/bin/env python
# coding=utf-8
from loguru import logger
import onnxruntime
import cv2
import numpy as np
def load_onnx_model(onnx_path):
onnx_session = onnxruntime.InferenceSession(onnx_path)
return onnx_session
def get_output_name(onnx_session):
output_name = []
for node in onnx_session.get_outputs():
output_name.append(node.name)
return output_name
def transform(image, target_shape=(960, 960)):
image_height, image_width, _ = image.shape
ratio_h = target_shape[1] * 1.0 / image_height
ratio_w = target_shape[0] * 1.0 / image_width
image = cv2.resize(image, target_shape)
return image, ratio_h, ratio_w
def is_overlap_v1(rect1, rect2, iou_threshold):
xx1 = max(rect1[0], rect2[0])
yy1 = max(rect1[1], rect2[1])
xx2 = min(rect1[2], rect2[2])
yy2 = min(rect1[3], rect2[3])
dx = max(0, xx2 - xx1 + 1)
dy = max(0, yy2 - yy1 + 1)
i = dx * dy
u = (rect1[2] - rect1[0] + 1) * (rect1[3] - rect1[1] + 1) + (
rect2[2] - rect2[0] + 1) * (rect2[3] - rect2[1] + 1) - i
ov = i / u
return ov >= iou_threshold
def raw_nms(boxes, iou_threshold=0.3):
if 0 == len(boxes):
return []
rects = list(boxes)
for i in range(len(rects)):
rects[i] = list(rects[i])
rects[i].append(i)
rects.sort(key=lambda x: x[4], reverse=True)
rect_valid = [True for i in range(len(rects))]
for i in range(len(rects)):
if rect_valid[i]:
j = i + 1
while j < len(rect_valid):
if is_overlap_v1(rects[i], rects[j], iou_threshold):
rect_valid[j] = False
j = j + 1
return [x[5] for i, x in enumerate(rects) if rect_valid[i]]
def onnx_inference(onnx_session, num_classes, image, topk_candidates=1000):
output_name = get_output_name(onnx_session)
image, ratio_h, ratio_w = transform(image)
image = image.astype(np.float32)
image = np.expand_dims(image.transpose((2, 0, 1)), 0)
scores, boxes = onnx_session.run(output_name, input_feed={"input": image})
keep = scores.max(axis=1) > 0.1
scores = scores[keep]
boxes = boxes[keep]
scores = scores.flatten()
# Keep top k top scoring indices only.
num_topk = min(topk_candidates, len(boxes))
# torch.sort is actually faster than .topk (at least on GPUs)
topk_idxs = np.argsort(scores)
scores = scores[topk_idxs][-num_topk:]
topk_idxs = topk_idxs[-num_topk:]
# filter out the proposals with low confidence score
shift_idxs = topk_idxs // num_classes
classes = topk_idxs % num_classes
boxes = boxes[shift_idxs]
boxes[:, 0] /= ratio_w
boxes[:, 1] /= ratio_h
boxes[:, 2] /= ratio_w
boxes[:, 3] /= ratio_h
return boxes, scores, classes
def run(onnx_session, image, class_names, score_thrs, nms_thr=0.6):
num_classes = len(class_names)
import time
t0 = time.time()
boxes, scores, cls_idxs = onnx_inference(onnx_session, num_classes, image)
cost = time.time() - t0
logger.info(f'memd inference: {cost}s')
assert len(boxes) == len(scores) and len(boxes) == len(cls_idxs)
if isinstance(score_thrs, float):
keep = scores > max(score_thrs, 0.2)
else:
score_thrs = np.asarray(score_thrs)
keep = scores > np.maximum(score_thrs[cls_idxs], 0.2)
pred_boxes = np.concatenate(
[boxes, scores[:, np.newaxis], cls_idxs[:, np.newaxis]], axis=1)
pred_boxes = pred_boxes[keep]
all_boxes = []
for cls_idx in range(len(class_names)):
keep_per_cls = pred_boxes[:, -1] == cls_idx
if keep_per_cls.sum() > 0:
pred_boxes_per_cls = pred_boxes[keep_per_cls].astype(np.float32)
keep_idx = raw_nms(pred_boxes_per_cls[:, :5], nms_thr)
for idx in keep_idx:
all_boxes.append(pred_boxes_per_cls[idx])
return all_boxes
|
burp/blackboxprotobuf/__init__.py | nccgroup/blackboxprotobuf | 261 | 11112239 | <gh_stars>100-1000
import os
import sys
import inspect
# Add correct directory to sys.path
_BASE_DIR = os.path.abspath(
os.path.dirname(inspect.getfile(inspect.currentframe())) + "../../../"
)
sys.path.insert(0, _BASE_DIR + "/lib/")
sys.path.insert(0, _BASE_DIR + "/burp/deps/six/")
sys.path.insert(0, _BASE_DIR + "/burp/deps/protobuf/python/")
# extend_path looks for other 'blackboxprotobuf' modules in the sys.path and
# adds them to __path__
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# Hack to fix loading protobuf libraries within Jython. See https://github.com/protocolbuffers/protobuf/issues/7776
def fix_protobuf():
import six
u = six.u
def new_u(s):
if s == r"[\ud800-\udfff]":
# Don't match anything
return "$^"
else:
return u(s)
six.u = new_u
fix_protobuf()
# mirror what we do in lib so we can use blackboxprotobuf.<function>
from blackboxprotobuf.lib.api import *
|
tests/mockapp/migrations/0005_mockperson_birthdate.py | fluxility/drf-haystack | 201 | 11112251 | <reponame>fluxility/drf-haystack
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-16 07:05
from __future__ import unicode_literals
from django.db import migrations, models
import tests.mockapp.models
class Migration(migrations.Migration):
dependencies = [
('mockapp', '0002_mockperson'),
]
operations = [
migrations.AddField(
model_name='mockperson',
name='birthdate',
field=models.DateField(default=tests.mockapp.models.get_random_date, null=True),
),
]
|
Chapter02/callCalculator.pyw | houdinii/Qt5-Python-GUI-Programming-Cookbook | 131 | 11112273 | import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoCalculator import *
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.pushButtonPlus.clicked.connect(self.addtwonum)
self.ui.pushButtonSubtract.clicked.connect(self.subtracttwonum)
self.ui.pushButtonMultiply.clicked.connect(self.multiplytwonum)
self.ui.pushButtonDivide.clicked.connect(self.dividetwonum)
self.show()
def addtwonum(self):
if len(self.ui.lineEditFirstNumber.text())!=0:
a=int(self.ui.lineEditFirstNumber.text())
else:
a=0
if len(self.ui.lineEditSecondNumber.text())!=0:
b=int(self.ui.lineEditSecondNumber.text())
else:
b=0
sum=a+b
self.ui.labelResult.setText("Addition: " +str(sum))
def subtracttwonum(self):
if len(self.ui.lineEditFirstNumber.text())!=0:
a=int(self.ui.lineEditFirstNumber.text())
else:
a=0
if len(self.ui.lineEditSecondNumber.text())!=0:
b=int(self.ui.lineEditSecondNumber.text())
else:
b=0
diff=a-b
self.ui.labelResult.setText("Substraction: " +str(diff))
def multiplytwonum(self):
if len(self.ui.lineEditFirstNumber.text())!=0:
a=int(self.ui.lineEditFirstNumber.text())
else:
a=0
if len(self.ui.lineEditSecondNumber.text())!=0:
b=int(self.ui.lineEditSecondNumber.text())
else:
b=0
mult=a*b
self.ui.labelResult.setText("Multiplication: " +str(mult))
def dividetwonum(self):
if len(self.ui.lineEditFirstNumber.text())!=0:
a=int(self.ui.lineEditFirstNumber.text())
else:
a=0
if len(self.ui.lineEditSecondNumber.text())!=0:
b=int(self.ui.lineEditSecondNumber.text())
else:
b=0
division=a/b
self.ui.labelResult.setText("Division: " +str(round(division,2)))
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
|
tests/model_fields/test_floatfield.py | ni-ning/django | 61,676 | 11112290 | <filename>tests/model_fields/test_floatfield.py
from django.db import transaction
from django.test import TestCase
from .models import FloatModel
class TestFloatField(TestCase):
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved instance
instance.size = instance
msg = (
'Tried to update field model_fields.FloatModel.size with a model '
'instance, %r. Use a value compatible with FloatField.'
) % instance
with transaction.atomic():
with self.assertRaisesMessage(TypeError, msg):
instance.save()
# Try setting field to object on retrieved object
obj = FloatModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaisesMessage(TypeError, msg):
obj.save()
def test_invalid_value(self):
tests = [
(TypeError, ()),
(TypeError, []),
(TypeError, {}),
(TypeError, set()),
(TypeError, object()),
(TypeError, complex()),
(ValueError, 'non-numeric string'),
(ValueError, b'non-numeric byte-string'),
]
for exception, value in tests:
with self.subTest(value):
msg = "Field 'size' expected a number but got %r." % (value,)
with self.assertRaisesMessage(exception, msg):
FloatModel.objects.create(size=value)
|
corehq/messaging/smsbackends/http/tests/test_models.py | akashkj/commcare-hq | 471 | 11112292 | <reponame>akashkj/commcare-hq
from corehq.util.urlvalidate.urlvalidate import PossibleSSRFAttempt
from datetime import datetime
from unittest.mock import patch, ANY
from django.test import SimpleTestCase
from corehq.apps.sms.models import SMS, OUTGOING
from corehq.util.urlvalidate.test.mockipinfo import hostname_resolving_to_ips
from ..models import SQLHttpBackend
from .. import models
class TestHttpBackend(SimpleTestCase):
@patch.object(models, 'urlopen')
def test_sends_without_error(self, mock_urlopen):
message = self._create_message(phone_number='1234567890', text='Hello World')
backend = self._create_backend(url='http://www.dimagi.com')
backend.send(message)
mock_urlopen.assert_called_with('http://www.dimagi.com?message=Hello+World&number=1234567890',
context=ANY, timeout=ANY)
@hostname_resolving_to_ips('malicious.address', ['127.0.0.1'])
@patch.object(SMS, 'save') # mocked to avoid the database
def test_throws_error_when_url_is_ssrf(self, mock_save):
message = self._create_message()
backend = self._create_backend(url='http://malicious.address')
with self.assertRaises(PossibleSSRFAttempt):
backend.send(message)
def _create_backend(self, url='http://www.dimagi.com',
message_param='message', number_param='number', method='GET'):
return SQLHttpBackend(extra_fields={
'url': url,
'message_param': message_param,
'number_param': number_param,
'include_plus': False,
'method': method
})
def _create_message(self,
domain='test_domain',
phone_number='1234567890',
direction=OUTGOING,
date=None,
text='Hello World'):
if not date:
date = datetime(2021, 5, 15)
return SMS(
domain=domain,
phone_number=phone_number,
direction=direction,
date=date,
text=text
)
|
security_monkey/tests/auditors/rds/test_rds_snapshot.py | boladmin/security_monkey | 4,258 | 11112299 | # Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.tests.auditors.rds.test_rds_snapshot
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @monkeysecurity
"""
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey.auditors.rds.rds_snapshot import RDSSnapshotAuditor
from security_monkey.watchers.rds.rds_snapshot import RDSSnapshot, RDSSnapshotItem
from security_monkey.datastore import Account, AccountType
from security_monkey import db
class RDSSnapshotAuditorTestCase(SecurityMonkeyTestCase):
def pre_test_setup(self):
RDSSnapshotAuditor(accounts=['TEST_ACCOUNT']).OBJECT_STORE.clear()
account_type_result = AccountType(name='AWS')
db.session.add(account_type_result)
db.session.commit()
# main
account = Account(identifier="123456789123", name="TEST_ACCOUNT",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT",
third_party=False, active=True)
# friendly
account2 = Account(identifier="222222222222", name="TEST_ACCOUNT_TWO",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT_TWO",
third_party=False, active=True)
# third party
account3 = Account(identifier="333333333333", name="TEST_ACCOUNT_THREE",
account_type_id=account_type_result.id, notes="TEST_ACCOUNT_THREE",
third_party=True, active=True)
db.session.add(account)
db.session.add(account2)
db.session.add(account3)
db.session.commit()
def test_check_internet_accessible(self):
config0 = {'Attributes': { 'restore': ['all'] } }
config1 = {'Attributes': { 'restore': [] } }
rsa = RDSSnapshotAuditor(accounts=['TEST_ACCOUNT'])
rsa.prep_for_audit()
item = RDSSnapshotItem(config=config0)
rsa.check_internet_accessible(item)
self.assertEqual(len(item.audit_issues), 1)
self.assertEqual(item.audit_issues[0].score, 10)
self.assertEqual(item.audit_issues[0].issue, 'Internet Accessible')
self.assertEqual(item.audit_issues[0].notes, 'Entity: [account:all] Actions: ["restore"]')
item = RDSSnapshotItem(config=config1)
rsa.check_internet_accessible(item)
self.assertEqual(len(item.audit_issues), 0)
def test_check_friendly(self):
config0 = {'Attributes': { 'restore': ["222222222222"] } }
rsa = RDSSnapshotAuditor(accounts=['TEST_ACCOUNT'])
rsa.prep_for_audit()
item = RDSSnapshotItem(config=config0)
rsa.check_friendly_cross_account(item)
self.assertEqual(len(item.audit_issues), 1)
self.assertEqual(item.audit_issues[0].score, 0)
self.assertEqual(item.audit_issues[0].issue, 'Friendly Cross Account')
self.assertEqual(item.audit_issues[0].notes, 'Account: [222222222222/TEST_ACCOUNT_TWO] Entity: [account:222222222222] Actions: ["restore"]')
def test_check_thirdparty(self):
config0 = {'Attributes': { 'restore': ["333333333333"] } }
rsa = RDSSnapshotAuditor(accounts=['TEST_ACCOUNT'])
rsa.prep_for_audit()
item = RDSSnapshotItem(config=config0)
rsa.check_thirdparty_cross_account(item)
self.assertEqual(len(item.audit_issues), 1)
self.assertEqual(item.audit_issues[0].score, 0)
self.assertEqual(item.audit_issues[0].issue, 'Thirdparty Cross Account')
self.assertEqual(item.audit_issues[0].notes, 'Account: [333333333333/TEST_ACCOUNT_THREE] Entity: [account:333333333333] Actions: ["restore"]')
def test_check_unknown(self):
config0 = {'Attributes': { 'restore': ["444444444444"] } }
rsa = RDSSnapshotAuditor(accounts=['TEST_ACCOUNT'])
rsa.prep_for_audit()
item = RDSSnapshotItem(config=config0)
rsa.check_unknown_cross_account(item)
self.assertEqual(len(item.audit_issues), 1)
self.assertEqual(item.audit_issues[0].score, 10)
self.assertEqual(item.audit_issues[0].issue, 'Unknown Access')
self.assertEqual(item.audit_issues[0].notes, 'Entity: [account:444444444444] Actions: ["restore"]') |
rbac/proxy.py | eldorplus/simple-rbac | 219 | 11112312 | <filename>rbac/proxy.py<gh_stars>100-1000
from __future__ import absolute_import
import functools
import collections
__all__ = ["dummy_factory", "model_role_factory", "model_resource_factory",
"RegistryProxy"]
# identity tuple
identity = collections.namedtuple("identity", ["type", "cls", "id"])
role_identity = functools.partial(identity, "role-model")
resource_identity = functools.partial(identity, "resource-model")
def GetFullName(m):
return "%s.%s" % (m.__module__, m.__name__)
def DummyFactory(acl, obj):
return obj
# inline functions
getfullname = GetFullName
dummy_factory = DummyFactory
def _model_identity_factory(obj, identity_maker, identity_adder):
if not hasattr(obj, "id"):
return obj
if isinstance(obj, type):
# make a identity tuple for the "class"
identity = identity_maker(getfullname(obj), None)
# register into access control list
identity_adder(identity)
else:
# make a identity tuple for the "instance" and the "class"
class_fullname = getfullname(obj.__class__)
identity = identity_maker(class_fullname, obj.id)
identity_type = identity_maker(class_fullname, None)
# register into access control list
identity_adder(identity, parents=[identity_type])
return identity
def model_role_factory(acl, obj):
"""A factory to create a identity tuple from a model class or instance."""
return _model_identity_factory(obj, role_identity, acl.add_role)
def model_resource_factory(acl, obj):
"""A factory to create a identity tuple from a model class or instance."""
return _model_identity_factory(obj, resource_identity, acl.add_resource)
class RegistryProxy(object):
"""A proxy of the access control list.
This proxy could use two factory function to create the role identity
object and the resource identity object automatic.
A example for the factory function:
>>> def role_factory(acl, input_role):
>>> role = ("my-role", str(input_role))
>>> acl.add_role(role)
>>> return role
"""
def __init__(self, acl, role_factory=dummy_factory,
resource_factory=model_resource_factory):
self.acl = acl
self.make_role = functools.partial(role_factory, self.acl)
self.make_resource = functools.partial(resource_factory, self.acl)
def add_role(self, role, parents=[]):
role = self.make_role(role)
parents = [self.make_role(parent) for parent in parents]
return self.acl.add_role(role, parents)
def add_resource(self, resource, parents=[]):
resource = self.make_resource(resource)
parents = [self.make_resource(parent) for parent in parents]
return self.acl.add_resource(resource, parents)
def allow(self, role, operation, resource, assertion=None):
role = self.make_role(role)
resource = self.make_resource(resource)
return self.acl.allow(role, operation, resource, assertion)
def deny(self, role, operation, resource, assertion=None):
role = self.make_role(role)
resource = self.make_resource(resource)
return self.acl.deny(role, operation, resource, assertion)
def is_allowed(self, role, operation, resource, **assertion_kwargs):
role = self.make_role(role)
resource = self.make_resource(resource)
return self.acl.is_allowed(role, operation,
resource, **assertion_kwargs)
def is_any_allowed(self, roles, operation, resource, **assertion_kwargs):
roles = [self.make_role(role) for role in roles]
resource = self.make_resource(resource)
return self.acl.is_any_allowed(roles, operation,
resource, **assertion_kwargs)
def __getattr__(self, attr):
return getattr(self.acl, attr)
|
jupyterlab_sql/connection_url.py | mwiewior/jupyterlab-sql | 385 | 11112315 | import sqlalchemy.engine.url
def is_sqlite(url):
backend = _to_sqlalchemy_url(url).get_backend_name()
return backend == "sqlite"
def is_mysql(url):
backend = _to_sqlalchemy_url(url).get_backend_name()
return backend == "mysql"
def has_database(url):
database = _to_sqlalchemy_url(url).database
# database is either None or an empty string, depending on
# whether the URL contains a trailing slash.
return bool(database)
def _to_sqlalchemy_url(url):
return sqlalchemy.engine.url.make_url(url)
|
torchelie/datasets/debug.py | Vermeille/Torchelie | 117 | 11112332 | import os
from typing import Optional, Callable
from typing_extensions import Literal
import torch
import torchvision.transforms as TF
from torch.utils.data import Dataset
from torchvision.datasets import ImageFolder
from torchvision.datasets.utils import download_and_extract_archive
__all__ = ['ColoredColumns', 'ColoredRows', 'Imagenette', 'Imagewoof']
class ColoredColumns(Dataset):
"""
A dataset of precedurally generated images of columns randomly colorized.
Args:
*size (int): size of images
transform (transforms or None): the image transforms to apply to the
generated pictures
"""
def __init__(self, *size, transform=None) -> None:
super(ColoredColumns, self).__init__()
self.size = size
self.transform = transform if transform is not None else (lambda x: x)
def __len__(self):
return 10000
def __getitem__(self, i):
cols = torch.randint(0, 255, (3, 1, self.size[1]))
expanded = cols.expand(3, *self.size).float()
img = TF.ToPILImage()(expanded / 255)
return self.transform(img), 0
class ColoredRows(Dataset):
"""
A dataset of precedurally generated images of rows randomly colorized.
Args:
*size (int): size of images
transform (transforms or None): the image transforms to apply to the
generated pictures
"""
def __init__(self, *size, transform=None) -> None:
super(ColoredRows, self).__init__()
self.size = size
self.transform = transform if transform is not None else (lambda x: x)
def __len__(self):
return 10000
def __getitem__(self, i):
rows = torch.randint(0, 255, (3, self.size[0], 1))
expanded = rows.expand(3, *self.size).float()
img = TF.ToPILImage()(expanded / 255)
return self.transform(img), 0
class Imagenette(ImageFolder):
"""
Imagenette by <NAME> ( https://github.com/fastai/imagenette ).
Args:
root (str): root directory
split (bool): if False, use validation split
transform (Callable): image transforms
download (bool): if True and root empty, download the dataset
version (str): which resolution to download ('full', '32Opx', '160px')
"""
def __init__(self,
root: str,
train: bool,
transform: Optional[Callable] = None,
download: bool = False,
version: Literal['full', '320px', '160px'] = '320px'):
size = ({
'full': 'imagenette2',
'320px': 'imagenette2-320',
'160px': 'imagenette2-160'
})[version]
split = 'train' if train else 'val'
if not self._check_integrity(f'{root}/{size}') and download:
download_and_extract_archive(
f'https://s3.amazonaws.com/fast-ai-imageclas/{size}.tgz',
root,
remove_finished=True)
super().__init__(f'{root}/{size}/{split}', transform=transform)
def _check_integrity(self, path):
return os.path.exists(os.path.expanduser(path))
class Imagewoof(ImageFolder):
"""
Imagewoof by <NAME> ( https://github.com/fastai/imagenette ).
Args:
root (str): root directory
split (bool): if False, use validation split
transform (Callable): image transforms
download (bool): if True and root empty, download the dataset
version (str): which resolution to download ('full', '32Opx', '160px')
"""
def __init__(self,
root: str,
train: bool,
transform: Optional[Callable] = None,
download: bool = False,
version: Literal['full', '320px', '160px'] = '320px'):
size = ({
'full': 'imagewoof2',
'320px': 'imagewoof2-320',
'160px': 'imagewoof2-160'
})[version]
split = 'train' if train else 'val'
if not self._check_integrity(f'{root}/{size}') and download:
download_and_extract_archive(
f'https://s3.amazonaws.com/fast-ai-imageclas/{size}.tgz',
root,
remove_finished=True)
super().__init__(f'{root}/{size}/{split}', transform=transform)
def _check_integrity(self, path):
return os.path.exists(os.path.expanduser(path))
|
src/chapter-12/test_caching.py | luizyao/pytest-chinese-doc | 283 | 11112354 | <reponame>luizyao/pytest-chinese-doc<filename>src/chapter-12/test_caching.py
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
'''
Author: luizyao (<EMAIL>)
Created Date: 2019-11-30 20:35:07
-----
Modified: 2019-11-30 20:36:29
Modified By: luizyao (<EMAIL>)
-----
THIS PROGRAM IS FREE SOFTWARE, IS LICENSED UNDER MIT.
A short and simple permissive license with conditions
only requiring preservation of copyright and license notices.
Copyright © 2019 <NAME>
-----
HISTORY:
Date By Comments
---------- -------- ---------------------------------------------------------
'''
# content of test_caching.py
import pytest
def expensive_computation():
print("running expensive computation...")
@pytest.fixture
def mydata(request):
# 从缓存中读取数据
val = request.config.cache.get("example/value", None)
if val is None:
expensive_computation()
val = 42
# 如果缓存中没有,设置缓存中的值
request.config.cache.set("example/value", val)
return val
def test_function(mydata):
assert mydata == 23
|
examples/plot_hide.py | ipa-maa/UpSetPlot | 175 | 11112388 | """
======================================
Hiding subsets based on size or degree
======================================
This illustrates the use of ``min_subset_size``, ``max_subset_size``,
``min_degree`` or ``max_degree``.
"""
from matplotlib import pyplot as plt
from upsetplot import generate_counts, plot
example = generate_counts()
plot(example, show_counts=True)
plt.suptitle('Nothing hidden')
plt.show()
##########################################################################
plot(example, show_counts=True, min_subset_size=100)
plt.suptitle('Small subsets hidden')
plt.show()
##########################################################################
plot(example, show_counts=True, max_subset_size=500)
plt.suptitle('Large subsets hidden')
plt.show()
##########################################################################
plot(example, show_counts=True, min_degree=2)
plt.suptitle('Degree <2 hidden')
plt.show()
##########################################################################
plot(example, show_counts=True, max_degree=2)
plt.suptitle('Degree >2 hidden')
plt.show()
|
archai/algos/gumbelsoftmax/gs_exp_runner.py | cclauss/archai | 344 | 11112429 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Type
from overrides import overrides
from archai.nas.exp_runner import ExperimentRunner
from archai.nas.arch_trainer import TArchTrainer
from archai.nas.finalizers import Finalizers
from .gs_model_desc_builder import GsModelDescBuilder
from .gs_arch_trainer import GsArchTrainer
from .gs_finalizers import GsFinalizers
class GsExperimentRunner(ExperimentRunner):
@overrides
def model_desc_builder(self)->GsModelDescBuilder:
return GsModelDescBuilder()
@overrides
def trainer_class(self)->TArchTrainer:
return GsArchTrainer
@overrides
def finalizers(self)->Finalizers:
return GsFinalizers()
|
examples/select_loop.py | webcoast-dk/mitogen | 1,526 | 11112441 | <filename>examples/select_loop.py
#
# This demonstrates using a nested select.Select() to simultaneously watch for
# in-progress events generated by a bunch of function calls, and the completion
# of those function calls.
#
# We start 5 children and run a function in each of them in parallel. The
# function writes the numbers 1..5 to a Sender before returning. The master
# reads the numbers from each child as they are generated, and exits the loop
# when the last function returns.
#
from __future__ import absolute_import
from __future__ import print_function
import time
import mitogen
import mitogen.select
def count_to(sender, n, wait=0.333):
for x in range(n):
sender.send(x)
time.sleep(wait)
@mitogen.main()
def main(router):
# Start 5 subprocesses and give them made up names.
contexts = {
'host%d' % (i,): router.local()
for i in range(5)
}
# Used later to recover hostname. A future Mitogen will provide a better
# way to get app data references back out of its IO primitives, for now you
# need to do it manually.
hostname_by_context_id = {
context.context_id: hostname
for hostname, context in contexts.items()
}
# I am a select that holds the receivers that will receive the function
# call results. Selects are one-shot by default, which means each receiver
# is removed from them as a result arrives. Therefore it means the last
# function has completed when bool(calls_sel) is False.
calls_sel = mitogen.select.Select()
# I receive the numbers as they are counted.
status_recv = mitogen.core.Receiver(router)
# Start the function calls
for hostname, context in contexts.items():
calls_sel.add(
context.call_async(
count_to,
sender=status_recv.to_sender(),
n=5,
wait=0.333
)
)
# Create a select subscribed to the function call result Select, and to the
# number-counting receiver. Any message arriving on any child of this
# Select will wake it up -- be it a message arriving on the status
# receiver, or any message arriving on any of the function call result
# receivers.
# Once last call is completed, calls_sel will be empty since it's
# oneshot=True (the default), causing __bool__ to be False
both_sel = mitogen.select.Select([status_recv, calls_sel], oneshot=False)
# Internally selects store a strong reference from Receiver->Select that
# will keep the Select alive as long as the receiver is alive. If a
# receiver or select otherwise 'outlives' some parent select, attempting to
# re-add it to a new select will raise an error. In all cases it's
# desirable to call Select.close(). This can be done as a context manager.
with calls_sel, both_sel:
while calls_sel:
try:
msg = both_sel.get(timeout=60.0)
except mitogen.core.TimeoutError:
print("No update in 60 seconds, something's broke")
break
hostname = hostname_by_context_id[msg.src_id]
if msg.receiver is status_recv: # https://mitogen.readthedocs.io/en/stable/api.html#mitogen.core.Message.receiver
# handle a status update
print('Got status update from %s: %s' % (hostname, msg.unpickle()))
elif msg.receiver is calls_sel: # subselect
# handle a function call result.
try:
assert None == msg.unpickle()
print('Task succeeded on %s' % (hostname,))
except mitogen.core.CallError as e:
print('Task failed on host %s: %s' % (hostname, e))
if calls_sel:
print('Some tasks did not complete.')
else:
print('All tasks completed.')
|
wtfml/logger/__init__.py | Mo5mami/wtfml | 283 | 11112461 | from .logger import logger
|
PyFin/tests/Math/Distributions/testDistribution.py | rpatil524/Finance-Python | 325 | 11112468 | # -*- coding: utf-8 -*-
u"""
Created on 2015-7-23
@author: cheng.li
"""
import unittest
import math
import copy
import tempfile
import pickle
import os
from PyFin.Math.Distributions import InverseCumulativeNormal
from PyFin.Math.Distributions import NormalDistribution
from PyFin.Math.Distributions import CumulativeNormalDistribution
average = 1.0
sigma = 2.0
def gaussian(x):
normFact = sigma * math.sqrt(2.0 * math.pi)
dx = x - average
return math.exp(-dx * dx / (2.0 * sigma * sigma)) / normFact
def gaussianDerivative(x):
normFact = sigma * sigma * sigma * math.sqrt(2.0 * math.pi)
dx = x - average
return -dx * math.exp(-dx * dx / (2.0 * sigma * sigma)) / normFact
class TestDistribution(unittest.TestCase):
def testNormal(self):
invCumStandardNormal = InverseCumulativeNormal()
check = invCumStandardNormal(0.5)
self.assertAlmostEqual(check, 0.0, 10, "inverse cumulative of the standard normal at 0.5 is {0:f}"
"\n instead of zero: something is wrong!".format(check))
normal = NormalDistribution(average, sigma)
cum = CumulativeNormalDistribution(average, sigma)
invCum = InverseCumulativeNormal(average, sigma)
invCumAcc = InverseCumulativeNormal(average, sigma, fullAccuracy=True)
numberOfStandardDeviation = 6
xMin = average - numberOfStandardDeviation * sigma
xMax = average + numberOfStandardDeviation * sigma
N = 100001
h = (xMax - xMin) / (N - 1)
x = [xMin + i * h for i in range(N)]
y = [gaussian(v) for v in x]
yd = [gaussianDerivative(v) for v in x]
temp = [normal(v) for v in x]
for i, (expected, calculated) in enumerate(zip(y, temp)):
self.assertAlmostEqual(expected, calculated, 15, "at index {0:d}\n"
"Expected: {1:f}\n"
"Calculated: {2:f}".format(i, expected, calculated))
temp = [cum(v) for v in x]
temp = [invCum(v) for v in temp]
for i, (expected, calculated) in enumerate(zip(x, temp)):
self.assertAlmostEqual(expected, calculated, 7, "at index {0:d}\n"
"Expected gaussian: {1:f}\n"
"Calculated Gaussian: {2:f}".format(i, expected,
calculated))
temp = [cum(v) for v in x]
temp = [invCumAcc(v) for v in temp]
for i, (expected, calculated) in enumerate(zip(x, temp)):
self.assertAlmostEqual(expected, calculated, 7, "at index {0:d}\n"
"Expected gaussian: {1:.9f}\n"
"Calculated Gaussian: {2:.9f}".format(i, expected,
calculated))
temp = [cum.derivative(v) for v in x]
for i, (expected, calculated) in enumerate(zip(y, temp)):
self.assertAlmostEqual(expected, calculated, 15, "at index {0:d}\n"
"Expected: {1:f}\n"
"Calculated: {2:f}".format(i, expected, calculated))
temp = [normal.derivative(v) for v in x]
for i, (expected, calculated) in enumerate(zip(yd, temp)):
self.assertAlmostEqual(expected, calculated, 15, "at index {0:d}\n"
"Expected: {1:f}\n"
"Calculated: {2:f}".format(i, expected, calculated))
# test nan value returning
self.assertTrue(math.isnan(invCum(-0.5)))
def testNormalDistributionCopy(self):
norm = NormalDistribution(average, sigma)
copied = copy.deepcopy(norm)
self.assertEqual(norm, copied)
def testNormalDistributionPickle(self):
benchmark_norm = NormalDistribution(average, sigma)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_norm, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_norm = pickle.load(f2)
self.assertEqual(benchmark_norm, pickled_norm)
os.unlink(f.name)
def testCumulativeNormalDistribution(self):
norm = CumulativeNormalDistribution(average, sigma)
copied = copy.deepcopy(norm)
self.assertEqual(norm, copied)
def testCumulativeNormalDistributionPickle(self):
benchmark_norm = CumulativeNormalDistribution(average, sigma)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_norm, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_norm = pickle.load(f2)
self.assertEqual(benchmark_norm, pickled_norm)
os.unlink(f.name)
def testInverseCumulativeNormal(self):
norm = InverseCumulativeNormal(average, sigma, True)
copied = copy.deepcopy(norm)
self.assertEqual(norm, copied)
def testInverseCumulativeNormalPickle(self):
benchmark_norm = InverseCumulativeNormal(average, sigma)
f = tempfile.NamedTemporaryFile('w+b', delete=False)
pickle.dump(benchmark_norm, f)
f.close()
with open(f.name, 'rb') as f2:
pickled_norm = pickle.load(f2)
self.assertEqual(benchmark_norm, pickled_norm) |
buildroot/support/testing/tests/package/test_prosody.py | rbrenton/hassos | 349 | 11112480 | <filename>buildroot/support/testing/tests/package/test_prosody.py
from tests.package.test_lua import TestLuaBase
class TestProsody(TestLuaBase):
def lua_dependencies_test(self):
self.module_test('bit') # luabitop
self.module_test('lfs') # luafilesystem
self.module_test('lxp') # luaexpat
self.module_test('socket') # luasocket
self.module_test('ssl') # luasec
def prosody_test(self):
# prosody was launched as service
cmd = "prosodyctl status"
output, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
self.assertIn("Prosody is running", output[0])
class TestProsodyLua51(TestProsody):
config = TestLuaBase.config + \
"""
BR2_PACKAGE_LUA=y
BR2_PACKAGE_LUA_5_1=y
BR2_PACKAGE_PROSODY=y
"""
def test_run(self):
self.login()
self.version_test('Lua 5.1')
self.g_version_test('Lua 5.1')
self.lua_dependencies_test()
self.prosody_test()
class TestProsodyLuajit(TestProsody):
config = TestLuaBase.config + \
"""
BR2_PACKAGE_LUAJIT=y
BR2_PACKAGE_PROSODY=y
"""
def test_run(self):
self.login()
self.version_test('LuaJIT 2')
self.g_version_test('Lua 5.1')
self.lua_dependencies_test()
self.prosody_test()
|
051-100/061-rotate-list.py | bbram10/leetcode-master | 134 | 11112525 | <filename>051-100/061-rotate-list.py
"""
STATEMENT
Given a list, rotate the list to the right by k places, where k is non-negative.
CLARIFICATIONS
- Is the list given as an array? No, a linked list.
- Each node in the linked list is an object of a custom data structure, right? Yes.
Define them.
- Can k be more than the length of the list? Sure.
- Do I return the head of the updated list? Yes.
EXAMPLES
1->2->3->4->5->None, 2 => 4->5->1->2->3->NULL
COMMENTS
- If k is much longer than the list length, we should not update the list unneccessarily,
so k should be updated to the k modulo list length.
- If k is 0, return the original list.
"""
def rotateRight(head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if head is None:
return head
temp = head
list_length = 1
while temp.next is not None:
temp = temp.next
list_length += 1
k = k%list_length
if k == 0:
return head
prev, curr = head, head
count = 1
while count <= k:
curr = curr.next
count += 1
while curr.next is not None:
prev = prev.next
curr = curr.next
to_return = prev.next
prev.next = None
curr.next = head
return to_return
|
rootpy/utils/hook.py | masonproffitt/rootpy | 146 | 11112529 | <reponame>masonproffitt/rootpy<gh_stars>100-1000
from __future__ import absolute_import
import types
import sys
from .inject_closure import inject_closure_values
from . import log; log = log[__name__]
__all__ = [
'super_overridden',
'uses_super',
'classhook',
'appendclass',
]
# The below code is here for reference:
# How to hook anything you want..
# TODO(pwaller): Delete this if no-one needs it after a month or two.
"""
from .. import QROOT
HOOKED_CLASSES = {}
TObject_meta = type(QROOT.TObject)
orig_meta_getattribute = TObject_meta.__getattribute__
def new_meta_getattribute(cls, name):
#print cls, name
if cls in HOOKED_CLASSES:
hook = HOOKED_METHODS.get((cls, name), None)
if hook:
hook(orig_getattribute)
return orig_meta_getattribute(cls, name)
TObject_meta.__getattribute__ = new_meta_getattribute
orig_getattribute = QROOT.TObject.__getattribute__
def new_getattribute(cls, name):
x = orig_getattribute(cls, name)
return x
QROOT.TObject.__getattribute__ = new_getattribute
"""
INTERESTING = (
types.FunctionType, types.MethodType,
property, staticmethod, classmethod)
def super_overridden(cls):
"""
This decorator just serves as a reminder that the super function behaves
differently. It doesn't actually do anything, that happens inside
``classhook.hook_class``.
"""
cls.__rootpy_have_super_overridden = True
return cls
def uses_super(func):
"""
Check if the function/property/classmethod/staticmethod uses the `super` builtin
"""
if isinstance(func, property):
return any(uses_super(f) for f in (func.fget, func.fset, func.fdel) if f)
elif isinstance(func, (staticmethod, classmethod)):
if sys.version_info >= (2, 7):
func = func.__func__
elif isinstance(func, staticmethod):
func = func.__get__(True)
else: # classmethod
func = func.__get__(True).im_func
if sys.version_info[0] >= 3:
return 'super' in func.__code__.co_names
return 'super' in func.func_code.co_names
class classhook(object):
"""
Interpose the `hook` classes' methods onto the target `classes`.
Note, it is also necessary to decorate these classes with @super_overridden
to indicate at the usage site that the super method may behave differently
than you expect.
The trick is that we want the hook function to call `super(ClassBeingHooked, self)`,
but there are potentially multiple ClassesBeingHooked. Therefore, instead
you must write `super(MyHookClass, self)` and the super method is replaced
at hook-time through bytecode modification with another one which does the
right thing.
Example usage:
@classhook(ROOT.TH1)
@super_overridden
class ChangeBehaviour(object):
def Draw(self, *args):
# Call the original draw function
result = super(ChangeBehaviour, self).Draw(*args)
# do something with the result here
return result
"""
def overridden_super(self, target, realclass):
class rootpy_overridden_super(super):
def __init__(self, cls, *args):
if cls is target:
cls = realclass
super(rootpy_overridden_super, self).__init__(cls, *args)
return rootpy_overridden_super
def __init__(self, *classes):
self.classes = classes
def hook_class(self, cls, hook):
# Attach a new class type with the original methods on it so that
# super() works as expected.
hookname = "_rootpy_{0}_OrigMethods".format(cls.__name__)
newcls = type(hookname, (), {})
cls.__bases__ = (newcls,) + cls.__bases__
# For every function-like (or property), replace `cls`'s methods
for key, value in hook.__dict__.items():
if not isinstance(value, INTERESTING):
continue
# Save the original methods onto the newcls which has been
# injected onto our bases, so that the originals can be called with
# super().
orig_method = getattr(cls, key, None)
if orig_method:
setattr(newcls, key, orig_method)
#newcls.__dict__[key] = orig_method
newmeth = value
if uses_super(newmeth):
assert getattr(hook, "__rootpy_have_super_overridden", None), (
"Hook class {0} is not decorated with @super_overridden! "
"See the ``hook`` module to understand why this must be "
"the case for all classes overridden with @classhook"
.format(hook))
# Make super behave as though the class hierarchy is what we'd
# like.
newsuper = self.overridden_super(hook, cls)
newmeth = inject_closure_values(value, super=newsuper)
setattr(cls, key, newmeth)
def __call__(self, hook):
"""
Hook the decorated class onto all `classes`.
"""
for cls in self.classes:
self.hook_class(cls, hook)
return hook
class appendclass(object):
"""
Append the methods/properties of `appender` onto `classes`. The methods
being appended must not exist on any of the target classes.
"""
def __init__(self, *classes):
self.classes = classes
def __call__(self, appender):
for appendee in self.classes:
for key, value in appender.__dict__.items():
if not isinstance(value, INTERESTING):
continue
assert not hasattr(appendee, key), (
"Don't override existing methods with appendclass")
assert not uses_super(value), ("Don't use the super class with "
"@appendclass, use @classhook instead")
setattr(appendee, key, value)
continue
return appender
|
csgomenumaker/component/debug.py | citrusCS/csgo-menu-maker | 152 | 11112584 | from .. import menu
from ..param import *
from .component import *
from . import generic
name_space(
"debug",
name="Debug",
description=(
"Debugging options that show different data on-screen, such as fps,"
" position, traces, etc."
)
)
@Component("showpos", "show_pos")
class ShowPos(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show Position"),
Desc("Show the player's position in the top left of the screen."),
Override("convar", "cl_showpos")
)
@Component("showfps", "show_fps")
class ShowFPS(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show FPS"),
Desc("Show the FPS and map name in the top left of the screen."),
Override("convar", "cl_showfps")
)
@Component("netgraph", "net_graph")
class NetGraph(generic.ChoiceVarBinary):
params = ParamObj(
Name("Net Graph"),
Desc("Show network stats and frames per second."),
Override("convar", "net_graph")
)
@Component("entityreport", "entity_report")
class EntityReport(generic.ChoiceVarBinary):
params = ParamObj(
Name("Entity Report"),
Desc("Show a list of all entities in the scene."),
Override("convar", "cl_entityreport"),
flags=["cheat"]
)
@Component("drawwireframe", "draw_wireframe")
class DrawWireframe(generic.ChoiceVarBinary):
params = ParamObj(
Name("Draw Wireframe"),
Desc("Draw a wireframe over the scene."),
Override("convar", "mat_wireframe"),
flags=["cheat"]
)
@Component("showevents", "show_events")
class ShowEvents(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show Events"),
Desc("Show entity event firing info in the top right of the screen."),
Override("convar", "cl_showevents"),
flags=["cheat"]
)
@Component("visualizetraces", "visualize_traces")
class VisualizeTraces(generic.ChoiceVarBinary):
params = ParamObj(
Name("Visualize Traces"),
Desc("Show raycast visualizations as lines."),
Override("convar", "r_visualizetraces"),
flags=["cheat"]
)
@Component("showbudget", "show_budget")
class ShowBudget(generic.Choice):
params = ParamObj(
Name("Show Render Budget"),
Desc(
"Show information about the current render budget, which tracks"
" the amounts of time each stage of rendering takes."
),
Override(
"choices",
[
{
"name": "-showbudget",
"commands": [
"-showbudget"
]
},
{
"name": "+showbudget",
"commands": [
"+showbudget"
]
}
]
),
flags=["cheat"]
)
@Component("drawskeleton", "draw_skeleton")
class DrawSkeleton(generic.ChoiceVarBinary):
params = ParamObj(
Name("Draw Skeletons"),
Desc("Draw bone entity skeletons in wireframe form."),
Override("convar", "enable_skeleton_draw"),
flags=["cheat"]
)
@Component("debugmenu", "debug_menu")
class DebugMenu(generic.FireableCmd):
params = ParamObj(
Name("Debug Menu"),
Desc("Open/Close the debug menu."),
Override("concmd", "debugsystemui"),
flags=["cheat", "needs_fireable"]
)
@Component("lockpvs", "lock_pvs")
class LockPVS(generic.ChoiceVarBinary):
params = ParamObj(
Name("Lock PVS"),
Desc(
"Lock/Unlock the PVS (Partially Visible Set) of polygons."
),
Override("convar", "r_lockpvs"),
flags=["cheat"]
)
@Component(
"drawvguitree",
"draw_vgui_tree",
"vguitree",
"vgui_tree",
"vguidrawtree",
"vgui_draw_tree"
)
class DrawVGUITree(generic.ChoiceVarBinary):
params = ParamObj(
Name("Draw VGUI Tree"),
Desc("Draw a tree of all VGUI widgets and their info."),
Override("convar", "vgui_draw_tree")
)
@Component("showsound", "show_sound")
class ShowSound(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show Sound"),
Desc(
"Show a list of currently playing sounds and their info in the top"
" right of the screen."
),
Override("convar", "snd_show"),
flags=["cheat"]
)
@Component("showlagcompensation", "show_lag_compensation")
class ShowLagCompensation(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show Lag Compensation"),
Desc("Show a lag compensated hitboxes, clientside."),
Override("convar", "sv_showlagcompensation")
)
@Component("showbullethits", "show_bullet_hits")
class ShowBulletHits(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show Bullet Hits"),
Desc("Show bullet hits as red cylinders when they hit an entity."),
Override("convar", "sv_showbullethits"),
flags=["replicated"]
)
@Component("showimpacts", "show_impacts")
class ShowImpacts(generic.ChoiceVarBinary):
params = ParamObj(
Name("Show Impacts"),
Desc("Show impacts as red/blue boxes wherever the bullet hits."),
Override("convar", "sv_showimpacts"),
flags=["replicated"]
)
name_space()
|
lgcn/train_lgcn.py | LLLjun/learn-to-cluster | 620 | 11112598 | <filename>lgcn/train_lgcn.py<gh_stars>100-1000
from __future__ import division
from collections import OrderedDict
import torch
from mmcv.runner import Runner, obj_from_dict
from mmcv.parallel import MMDataParallel
from lgcn.datasets import build_dataset, build_dataloader
from lgcn.online_evaluation import online_evaluate
def batch_processor(model, data, train_mode):
assert train_mode
pred, loss = model(data, return_loss=True)
log_vars = OrderedDict()
_, _, _, gtmat = data
acc, p, r = online_evaluate(gtmat, pred)
log_vars['loss'] = loss.item()
log_vars['accuracy'] = acc
log_vars['precision'] = p
log_vars['recall'] = r
outputs = dict(loss=loss, log_vars=log_vars, num_samples=len(gtmat))
return outputs
def train_lgcn(model, cfg, logger):
# prepare data loaders
for k, v in cfg.model['kwargs'].items():
setattr(cfg.train_data, k, v)
dataset = build_dataset(cfg.train_data)
data_loaders = [
build_dataloader(dataset,
cfg.batch_size_per_gpu,
cfg.workers_per_gpu,
train=True,
shuffle=True)
]
# train
if cfg.distributed:
raise NotImplementedError
else:
_single_train(model, data_loaders, cfg)
def build_optimizer(model, optimizer_cfg):
"""Build optimizer from configs.
"""
if hasattr(model, 'module'):
model = model.module
optimizer_cfg = optimizer_cfg.copy()
paramwise_options = optimizer_cfg.pop('paramwise_options', None)
assert paramwise_options is None
return obj_from_dict(optimizer_cfg, torch.optim,
dict(params=model.parameters()))
def _single_train(model, data_loaders, cfg):
if cfg.gpus > 1:
raise NotImplemented
# put model on gpus
model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda()
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir,
cfg.log_level)
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
|
tests/db/test_password.py | Yurzs/boto | 5,079 | 11112608 | # Copyright (c) 2010 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import unittest
import logging
import time
log= logging.getLogger('password_property_test')
log.setLevel(logging.DEBUG)
class PasswordPropertyTest(unittest.TestCase):
"""Test the PasswordProperty"""
def tearDown(self):
cls=self.test_model()
for obj in cls.all(): obj.delete()
def hmac_hashfunc(self):
import hmac
def hashfunc(msg):
return hmac.new('mysecret', msg)
return hashfunc
def test_model(self,hashfunc=None):
from boto.utils import Password
from boto.sdb.db.model import Model
from boto.sdb.db.property import PasswordProperty
import hashlib
class MyModel(Model):
password=PasswordProperty(hashfunc=hashfunc)
return MyModel
def test_custom_password_class(self):
from boto.utils import Password
from boto.sdb.db.model import Model
from boto.sdb.db.property import PasswordProperty
import hmac, hashlib
myhashfunc = hashlib.md5
## Define a new Password class
class MyPassword(Password):
hashfunc = myhashfunc #hashlib.md5 #lambda cls,msg: hmac.new('mysecret',msg)
## Define a custom password property using the new Password class
class MyPasswordProperty(PasswordProperty):
data_type=MyPassword
type_name=MyPassword.__name__
## Define a model using the new password property
class MyModel(Model):
password=MyPasswordProperty()#hashfunc=hashlib.md5)
obj = MyModel()
obj.password = '<PASSWORD>'
expected = myhashfunc('bar').hexdigest() #hmac.new('mysecret','bar').hexdigest()
log.debug("\npassword=%s\nexpected=%s" % (obj.password, expected))
self.assertTrue(obj.password == '<PASSWORD>' )
obj.save()
id= obj.id
time.sleep(5)
obj = MyModel.get_by_id(id)
self.assertEquals(obj.password, '<PASSWORD>')
self.assertEquals(str(obj.password), expected)
#hmac.new('mysecret','bar').hexdigest())
def test_aaa_default_password_property(self):
cls = self.test_model()
obj = cls(id='passwordtest')
obj.password = '<PASSWORD>'
self.assertEquals('foo', obj.password)
obj.save()
time.sleep(5)
obj = cls.get_by_id('passwordtest')
self.assertEquals('foo', obj.password)
def test_password_constructor_hashfunc(self):
import hmac
myhashfunc=lambda msg: hmac.new('mysecret', msg)
cls = self.test_model(hashfunc=myhashfunc)
obj = cls()
obj.password='<PASSWORD>'
expected = myhashfunc('hello').hexdigest()
self.assertEquals(obj.password, 'hello')
self.assertEquals(str(obj.password), expected)
obj.save()
id = obj.id
time.sleep(5)
obj = cls.get_by_id(id)
log.debug("\npassword=%s" % obj.password)
self.assertTrue(obj.password == 'hello')
if __name__ == '__main__':
import sys, os
curdir = os.path.dirname( os.path.abspath(__file__) )
srcroot = curdir + "/../.."
sys.path = [ srcroot ] + sys.path
logging.basicConfig()
log.setLevel(logging.INFO)
suite = unittest.TestLoader().loadTestsFromTestCase(PasswordPropertyTest)
unittest.TextTestRunner(verbosity=2).run(suite)
import boto
|
scripts/ios/relpath.py | Son-Le-Goog/nimbus | 2,787 | 11112619 | <gh_stars>1000+
#!/usr/bin/env python
# encoding: utf-8
"""
relpath.py
A method for calculating the relative path from one directory to another.
Originally written by <NAME>:
http://code.activestate.com/recipes/208993-compute-relative-path-from-one-directory-to-anothe/
Date: July 6, 2003
Modifications Copyright 2011 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
def pathsplit(p, rest=[]):
(h,t) = os.path.split(p)
if len(h) < 1: return [t]+rest
if len(t) < 1: return [h]+rest
return pathsplit(h,[t]+rest)
def commonpath(l1, l2, common=[]):
if len(l1) < 1: return (common, l1, l2)
if len(l2) < 1: return (common, l1, l2)
if l1[0] != l2[0]: return (common, l1, l2)
return commonpath(l1[1:], l2[1:], common+[l1[0]])
def relpath(p1, p2):
(common,l1,l2) = commonpath(pathsplit(p1), pathsplit(p2))
p = []
if len(l1) > 0:
p = [ '../' * len(l1) ]
p = p + l2
return os.path.join( *p )
|
Exec/gravity_tests/hse_convergence_general/hse.py | MargotF/Castro | 178 | 11112653 | #!/bin/env python
import argparse
import numpy as np
import matplotlib.pyplot as plt
import yt
def doit(outfile="hse.png"):
low = yt.load("flame_wave_128_plt01600")
med = yt.load("flame_wave_256_plt03200")
high = yt.load("flame_wave_512_plt06400")
fig, ax = plt.subplots(1,1)
for ds, l in [(low, "128"), (med, "256"), (high, "512")]:
ray = ds.ray((0, 0, 0), (2.56e3, 0, 0))
isrt = np.argsort(ray["x"])
ax.plot(ray["x"][isrt], ray["magvel"][isrt], label=l)
ax.legend(frameon=False)
ax.set_yscale("log")
fig.tight_layout()
fig.savefig(outfile)
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("outfile", type=str, nargs="?", default="hse.png")
args = p.parse_args()
doit(outfile=args.outfile)
|
alipay/aop/api/FileItem.py | articuly/alipay-sdk-python-all | 213 | 11112666 | <filename>alipay/aop/api/FileItem.py
# -*- coding: utf-8 -*-
'''
Created on 2017-12-20
@author: liuqun
'''
from alipay.aop.api.util.CommonUtils import get_mime_type
class FileItem(object):
def __init__(self, file_name=None, file_content=None, mime_type=None):
self._file_name = file_name
self._file_content = file_content
self._mime_type = mime_type
def get_file_name(self):
return self._file_name
def get_file_content(self):
return self._file_content
def get_mime_type(self):
if not self._mime_type:
self._mime_type = get_mime_type(self.get_file_content())
return self._mime_type
|
codigo/Live43/exemplo_2.py | cassiasamp/live-de-python | 572 | 11112668 | <reponame>cassiasamp/live-de-python
from sys import stdout
class PrintMock:
def __enter__(self):
self.old_print = stdout.write
self.file = open('log.txt', 'a')
stdout.write = self.log
def log(self, arg):
self.file.write('{}'.format(arg))
def __exit__(self, type, value, traceback):
stdout.write = self.old_print
|
src/pretix/base/migrations/0049_checkin.py | pajowu/pretix | 1,248 | 11112669 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-08 16:47
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0048_auto_20161129_1330'),
('pretixdroid', '0002_auto_20161208_1644'),
]
state_operations = [
migrations.CreateModel(
name='Checkin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(auto_now_add=True)),
('position', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pretixdroid_checkins', to='pretixbase.OrderPosition')),
],
),
]
operations = [
migrations.SeparateDatabaseAndState(state_operations=state_operations)
]
|
recipes/Python/578465_longest_common_substring/recipe-578465.py | tdiprima/code | 2,023 | 11112672 | <reponame>tdiprima/code
#!/usr/bin/env python3.2
import numpy as np
def longest_common_substring(src, dst) :
c = np.zeros((len(src), len(dst)), dtype=np.int)
z = 0
src_m = None
dst_m = None
for i in range(len(src)) :
for j in range(len(dst)) :
if src[i] == dst[j] :
if i == 0 or j == 0 :
c[i,j] = 1
else :
c[i, j] = c[i-1, j-1] + 1
if c[i, j] > z :
z = c[i, j]
if c[i, j] == z :
src_m = (i-z+1, i+1)
dst_m = (j-z+1, j+1)
else :
c[i, j] = 0
return src_m, dst_m
>>> a = """Lorem ipsum dolor sit amet consectetur adipiscing
elit Ut id nisl quis lacus lobortis egestas id nec turpis""".split()
>>> b = """Lorem ipsum lobortis dolor sit adipiscing elit dolor
amet consectetur Ut id nisl quis lacus egestas id nec turpis""".split()
>>> src_m, dst_m = longest_common_substring(a, b)
>>> print(src_m[0], src_m[1])
8 13
>>> print(a[src_m[0]:src_m[1]])
['Ut', 'id', 'nisl', 'quis', 'lacus']
>>> print(dst_m[0], dst_m[1])
10 15
>>> print(b[dst_m[0]:dst_m[1]])
['Ut', 'id', 'nisl', 'quis', 'lacus']
|
suzieq/db/parquet/migratedb.py | foobug/suzieq | 487 | 11112677 |
from typing import Callable, Union
import pandas as pd
from pandas.core.computation.ops import UndefinedVariableError
def get_migrate_fn(table_name: str, from_vers: str,
to_vers: str) -> Union[Callable, None]:
"""Return a migration function if one is present for the table specified
:param table_name: str, Name of the table for which we need the converter
:param from_vers: str, Version number from which the conversion is needed
:param to_vers: str, Version number to which the conversion is needed
:returns: Routine to invoke to convert the data, or None
:rtype: Callable or None
"""
conversion_dict = {
'bgp-1.0-2.0': _convert_bgp_vers_1_to_2
}
return conversion_dict.get(f'{table_name}-{from_vers}-{to_vers}', None)
def _convert_bgp_vers_1_to_2(df: pd.DataFrame) -> pd.DataFrame:
"""Convert BGP schema from version 1.0 to version 2.0
The dataframe MUST contain the sqvers column
"""
def set_community(x):
communities = []
if (x.afi != 'l2vpn'):
return communities
if x.evpnSendCommunity == 'extendedAndStandard':
communities = ['standard', 'extended']
elif x.evpnSendCommunity == 'extended':
communities = ['extended']
elif x.evpnSendCommunity == 'standard':
communities = ['standard']
return communities
converted_df = pd.DataFrame()
for pfx in ['v4', 'v6', 'evpn']:
try:
newdf = df.query(f'sqvers == "1.0" and {pfx}Enabled').reset_index()
except UndefinedVariableError:
newdf = pd.DataFrame()
if not newdf.empty:
if pfx == 'evpn':
newdf['afi'] = 'l2vpn'
newdf['safi'] = 'evpn'
else:
newdf['safi'] = 'unicast'
if pfx == 'v4':
newdf['afi'] = 'ipv4'
else:
newdf['afi'] = 'ipv6'
newdf = newdf.rename(columns={
f'{pfx}PfxRx': 'pfxRx',
f'{pfx}PfxTx': 'pfxTx',
f'{pfx}IngressRmap': 'ingressRmap',
f'{pfx}EgressRmap': 'egressRmap',
f'{pfx}defaultsent': 'defOriginate',
})
newdf['afisAdvOnly'] = [[] for _ in range(len(newdf))]
newdf['afisRcvOnly'] = [[] for _ in range(len(newdf))]
newdf['communityTypes'] = [[] for _ in range(len(newdf))]
converted_df = pd.concat([converted_df, newdf])
if not converted_df.empty:
converted_df['afisAdvOnly'] += converted_df.apply(
lambda x: ['ipv4 unicast']
if (x.v4Advertised and not x.v4Received) else [], axis=1)
converted_df['afisAdvOnly'] += converted_df.apply(
lambda x: ['ipv6 unicast']
if (x.v6Advertised and not x.v6Received) else [], axis=1)
converted_df['afisAdvOnly'] += converted_df.apply(
lambda x: ['l2vpn evpn']
if (x.evpnAdvertised and not x.evpnReceived) else [], axis=1)
converted_df['afisRcvOnly'] += converted_df.apply(
lambda x: ['ipv4 unicast']
if (not x.v4Advertised and x.v4Received) else [], axis=1)
converted_df['afisRcvOnly'] += converted_df.apply(
lambda x: ['ipv6 unicast']
if (not x.v6Advertised and x.v6Received) else [], axis=1)
converted_df['afisRcvOnly'] += converted_df.apply(
lambda x: ['l2vpn evpn']
if (not x.evpnAdvertised and x.evpnReceived) else [], axis=1)
converted_df['communityTypes'] += converted_df.apply(set_community,
axis=1)
converted_df['sqvers'] = '2.0'
unconverted_df = df.query('sqvers != "1.0"').reset_index()
final_df = pd.concat([converted_df, unconverted_df])
return final_df.reset_index(drop=True)
else:
return df
|
app/magtape/test/test_routes.py | gitter-badger/magtape | 140 | 11112683 | <reponame>gitter-badger/magtape<filename>app/magtape/test/test_routes.py<gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2020 T-Mobile, USA, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of
# its contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
import json
import sys
import unittest
from unittest.mock import patch
sys.path.append("./app/magtape/")
from magtape import magtape
class TestRoutes(unittest.TestCase):
def setUp(self):
self.app = magtape.app.test_client()
self.app.testing = True
self.k8s_events_enabled = "FALSE"
def tearDown(self):
pass
def test_healthz(self):
"""Method to test webhook /healthz route"""
result = self.app.get("/healthz")
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data)["health"], "ok")
self.assertEqual(json.loads(result.data)["pod_name"], "magtape-abc1234")
@patch("magtape.magtape.build_response_message", return_value="")
def test_webhook_all_pass(self, magtape_build_response_message_function):
"""Method to test webhook with all fail response from OPA sidecar"""
with open("./testing/deployments/test-deploy01.json") as json_file:
request_object_json = json.load(json_file)
result = self.app.post(
"/",
data=json.dumps(request_object_json),
headers={"Content-Type": "application/json"},
)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data)["response"]["allowed"], True)
@patch("magtape.magtape.k8s_events_enabled", "FALSE")
@patch(
"magtape.magtape.build_response_message",
return_value='[FAIL] HIGH - Found privileged Security Context for container "test-deploy02" (MT2001), [FAIL] LOW - Liveness Probe missing for container "test-deploy02" (MT1001), [FAIL] LOW - Readiness Probe missing for container "test-deploy02" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container "test-deploy02" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container "test-deploy02" (MT1004)',
)
def test_webhook_all_fail(self, build_response_message_function):
"""Method to test webhook with all fail response from OPA sidecar"""
with open("./testing/deployments/test-deploy02.json") as json_file:
request_object_json = json.load(json_file)
result = self.app.post(
"/",
data=json.dumps(request_object_json),
headers={"Content-Type": "application/json"},
)
self.assertEqual(result.status_code, 200)
self.assertEqual(json.loads(result.data)["response"]["allowed"], False)
self.assertEqual(
json.loads(result.data)["response"]["status"]["message"],
'[FAIL] HIGH - Found privileged Security Context for container "test-deploy02" (MT2001), [FAIL] LOW - Liveness Probe missing for container "test-deploy02" (MT1001), [FAIL] LOW - Readiness Probe missing for container "test-deploy02" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container "test-deploy02" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container "test-deploy02" (MT1004)',
)
if __name__ == "__main__":
unittest.main()
|
tartiflette/coercers/outputs/object_coercer.py | matt-koevort/tartiflette | 530 | 11112698 | from typing import Any, Dict, List
from tartiflette.coercers.outputs.common import complete_object_value
from tartiflette.coercers.outputs.null_coercer import null_coercer_wrapper
__all__ = ("object_coercer",)
@null_coercer_wrapper
async def object_coercer(
result: Any,
info: "ResolveInfo",
execution_context: "ExecutionContext",
field_nodes: List["FieldNode"],
path: "Path",
object_type: "GraphQLObjectType",
) -> Dict[str, Any]:
"""
Computes the value of an object.
:param result: resolved value
:param info: information related to the execution and the resolved field
:param execution_context: instance of the query execution context
:param field_nodes: AST nodes related to the resolved field
:param path: the path traveled until this resolver
:param object_type: the GraphQLObjectType instance of the object
:type result: Any
:type info: ResolveInfo
:type execution_context: ExecutionContext
:type field_nodes: List[FieldNode]
:type path: Path
:type object_type: GraphQLObjectType
:return: the computed value
:rtype: Dict[str, Any]
"""
# pylint: disable=unused-argument
return await complete_object_value(
result, info, execution_context, field_nodes, path, object_type
)
|
src/sh/run_all_python_tests.py | vb-wayne/paragraph | 111 | 11112703 | #! /usr/bin/env python3
import os
import sys
import unittest
def main():
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "python"))
testsuite = unittest.TestLoader().discover(os.path.join(project_dir, "test"))
lib_results = unittest.TextTestRunner(verbosity=3).run(testsuite)
print("%d tests run in test/, %d failures, %d errors" %
(lib_results.testsRun, len(lib_results.failures), len(lib_results.errors)))
if len(lib_results.failures) > 0:
sys.exit(1)
if len(lib_results.errors) > 0:
sys.exit(1)
if __name__ == '__main__':
main()
|
meshrcnn/utils/projtransform.py | MAYURGAIKWAD/meshrcnn | 1,028 | 11112704 | <gh_stars>1000+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
class ProjectiveTransform(object):
"""
Projective Transformation in PyTorch:
Follows a similar design to skimage.ProjectiveTransform
https://github.com/scikit-image/scikit-image/blob/master/skimage/transform/_geometric.py#L494
The implementation assumes batched representations,
so every tensor is assumed to be of shape batch x dim1 x dim2 x etc.
"""
def __init__(self, matrix=None):
if matrix is None:
# default to an identity transform
matrix = torch.eye(3).view(1, 3, 3)
if matrix.ndim != 3 and matrix.shape[-1] != 3 and matrix.shape[-2] != 3:
raise ValueError("Shape of transformation matrix should be Bx3x3")
self.params = matrix
@property
def _inv_matrix(self):
return torch.inverse(self.params)
def _apply_mat(self, coords, matrix):
"""
Applies matrix transformation
Input:
coords: FloatTensor of shape BxNx2
matrix: FloatTensor of shape Bx3x3
Returns:
new_coords: FloatTensor of shape BxNx2
"""
if coords.shape[0] != matrix.shape[0]:
raise ValueError("Mismatch in the batch dimension")
if coords.ndim != 3 or coords.shape[-1] != 2:
raise ValueError("Input tensors should be of shape BxNx2")
# append 1s, shape: BxNx2 -> BxNx3
src = torch.cat(
[
coords,
torch.ones(
(coords.shape[0], coords.shape[1], 1), device=coords.device, dtype=torch.float32
),
],
dim=2,
)
dst = torch.bmm(matrix, src.transpose(1, 2)).transpose(1, 2)
# rescale to homogeneous coordinates
dst[:, :, 0] /= dst[:, :, 2]
dst[:, :, 1] /= dst[:, :, 2]
return dst[:, :, :2]
def __call__(self, coords):
"""Apply forward transformation.
Input:
coords: FloatTensor of shape BxNx2
Output:
coords: FloateTensor of shape BxNx2
"""
return self._apply_mat(coords, self.params)
def inverse(self, coords):
"""Apply inverse transformation.
Input:
coords: FloatTensor of shape BxNx2
Output:
coords: FloatTensor of shape BxNx2
"""
return self._apply_mat(coords, self._inv_matrix)
def estimate(self, src, dst, method="svd"):
"""
Estimates the matrix to transform src to dst.
Input:
src: FloatTensor of shape BxNx2
dst: FloatTensor of shape BxNx2
method: Specifies the method to solve the linear system
"""
if src.shape != dst.shape:
raise ValueError("src and dst tensors but be of same shape")
if src.ndim != 3 or src.shape[-1] != 2:
raise ValueError("Input should be of shape BxNx2")
device = src.device
batch = src.shape[0]
# Center and normalize image points for better numerical stability.
try:
src_matrix, src = _center_and_normalize_points(src)
dst_matrix, dst = _center_and_normalize_points(dst)
except ZeroDivisionError:
self.params = torch.zeros((batch, 3, 3), device=device)
return False
xs = src[:, :, 0]
ys = src[:, :, 1]
xd = dst[:, :, 0]
yd = dst[:, :, 1]
rows = src.shape[1]
# params: a0, a1, a2, b0, b1, b2, c0, c1, (c3=1)
A = torch.zeros((batch, rows * 2, 9), device=device, dtype=torch.float32)
A[:, :rows, 0] = xs
A[:, :rows, 1] = ys
A[:, :rows, 2] = 1
A[:, :rows, 6] = -xd * xs
A[:, :rows, 7] = -xd * ys
A[:, rows:, 3] = xs
A[:, rows:, 4] = ys
A[:, rows:, 5] = 1
A[:, rows:, 6] = -yd * xs
A[:, rows:, 7] = -yd * ys
A[:, :rows, 8] = xd
A[:, rows:, 8] = yd
if method == "svd":
A = A.cpu() # faster computation in cpu
# Solve for the nullspace of the constraint matrix.
_, _, V = torch.svd(A, some=False)
V = V.transpose(1, 2)
H = torch.ones((batch, 9), device=device, dtype=torch.float32)
H[:, :-1] = -V[:, -1, :-1] / V[:, -1, -1].view(-1, 1)
H = H.reshape(batch, 3, 3)
# H[:, 2, 2] = 1.0
elif method == "least_sqr":
A = A.cpu() # faster computation in cpu
# Least square solution
x, _ = torch.solve(-A[:, :, -1].view(-1, 1), A[:, :, :-1])
H = torch.cat([-x, torch.ones((1, 1), dtype=x.dtype, device=device)])
H = H.reshape(3, 3)
elif method == "inv":
# x = inv(A'A)*A'*b
invAtA = torch.inverse(torch.mm(A[:, :-1].t(), A[:, :-1]))
Atb = torch.mm(A[:, :-1].t(), -A[:, -1].view(-1, 1))
x = torch.mm(invAtA, Atb)
H = torch.cat([-x, torch.ones((1, 1), dtype=x.dtype, device=device)])
H = H.reshape(3, 3)
else:
raise ValueError("method {} undefined".format(method))
# De-center and de-normalize
self.params = torch.bmm(torch.bmm(torch.inverse(dst_matrix), H), src_matrix)
return True
def _center_and_normalize_points(points):
"""Center and normalize points.
The points are transformed in a two-step procedure that is expressed
as a transformation matrix. The matrix of the resulting points is usually
better conditioned than the matrix of the original points.
Center the points, such that the new coordinate system has its
origin at the centroid of the image points.
Normalize the points, such that the mean distance from the points
to the origin of the coordinate system is sqrt(2).
Inputs:
points: FloatTensor of shape BxNx2 of the coordinates of the image points.
Outputs:
matrix: FloatTensor of shape Bx3x3 of the transformation matrix to obtain
the new points.
new_points: FloatTensor of shape BxNx2 of the transformed image points.
References
----------
.. [1] <NAME>. "In defense of the eight-point algorithm."
Pattern Analysis and Machine Intelligence, IEEE Transactions on 19.6
(1997): 580-593.
"""
device = points.device
centroid = torch.mean(points, 1, keepdim=True)
rms = torch.sqrt(torch.sum((points - centroid) ** 2.0, dim=(1, 2)) / points.shape[1])
norm_factor = torch.sqrt(torch.tensor([2.0], device=device)) / rms
matrix = torch.zeros((points.shape[0], 3, 3), dtype=torch.float32, device=device)
matrix[:, 0, 0] = norm_factor
matrix[:, 0, 2] = -norm_factor * centroid[:, 0, 0]
matrix[:, 1, 1] = norm_factor
matrix[:, 1, 2] = -norm_factor * centroid[:, 0, 1]
matrix[:, 2, 2] = 1.0
# matrix = torch.tensor(
# [
# [norm_factor, 0.0, -norm_factor * centroid[0]],
# [0.0, norm_factor, -norm_factor * centroid[1]],
# [0.0, 0.0, 1.0],
# ], device=device, dtype=torch.float32)
pointsh = torch.cat(
[
points,
torch.ones((points.shape[0], points.shape[1], 1), device=device, dtype=torch.float32),
],
dim=2,
)
new_pointsh = torch.bmm(matrix, pointsh.transpose(1, 2)).transpose(1, 2)
new_points = new_pointsh[:, :, :2]
new_points[:, :, 0] /= new_pointsh[:, :, 2]
new_points[:, :, 1] /= new_pointsh[:, :, 2]
return matrix, new_points
|
projects/FastRetri/fastretri/config.py | NTU-ROSE/fast-reid | 2,194 | 11112735 | # encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
"""
def add_retri_config(cfg):
_C = cfg
_C.TEST.RECALLS = [1, 2, 4, 8, 16, 32]
|
linebot/models/imagemap.py | naotokuwa/line-bot-sdk-python | 1,563 | 11112740 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.imagemap module."""
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
from .send_messages import SendMessage
class ImagemapSendMessage(SendMessage):
"""ImagemapSendMessage.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
Imagemaps are images with one or more links. You can assign one link for the entire image
or multiple links which correspond to different regions of the image.
"""
def __init__(self, base_url=None, alt_text=None, base_size=None,
video=None, actions=None, **kwargs):
"""__init__ method.
:param str base_url: Base URL of image.
HTTPS
:param str alt_text: Alternative text
:param base_size: Width and height of base image
:type base_size: :py:class:`linebot.models.imagemap.BaseSize`
:param video: Video in imagemap message
:type video: :py:class:`linebot.models.imagemap.Video`
:param actions: Action when tapped
:type actions: list[T <= :py:class:`linebot.models.imagemap.ImagemapAction`]
:param kwargs:
"""
super(ImagemapSendMessage, self).__init__(**kwargs)
self.type = 'imagemap'
self.base_url = base_url
self.alt_text = alt_text
self.base_size = self.get_or_new_from_json_dict(
base_size, BaseSize
)
self.video = self.get_or_new_from_json_dict(
video, Video
)
new_actions = []
if actions:
for action in actions:
action_obj = self.get_or_new_from_json_dict_with_types(
action, {
'uri': URIImagemapAction,
'message': MessageImagemapAction
}
)
if action_obj:
new_actions.append(action_obj)
self.actions = new_actions
class BaseSize(Base):
"""BaseSize.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
"""
def __init__(self, width=None, height=None, **kwargs):
"""__init__ method.
:param int width: Width of base image (set to 1040px)
:param int height: Height of base image(set to the height
that corresponds to a width of 1040px
:param kwargs:
"""
super(BaseSize, self).__init__(**kwargs)
self.width = width
self.height = height
class ImagemapAction(with_metaclass(ABCMeta, Base)):
"""ImagemapAction.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
"""
def __init__(self, **kwargs):
"""__init__ method.
:param kwargs:
"""
super(ImagemapAction, self).__init__(**kwargs)
self.type = None
class URIImagemapAction(ImagemapAction):
"""URIImagemapAction.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
"""
def __init__(self, link_uri=None, area=None, **kwargs):
"""__init__ method.
:param str link_uri: Webpage URL
:param area: Defined tappable area
:type area: :py:class:`linebot.models.imagemap.ImagemapArea`
:param kwargs:
"""
super(URIImagemapAction, self).__init__(**kwargs)
self.type = 'uri'
self.link_uri = link_uri
self.area = self.get_or_new_from_json_dict(area, ImagemapArea)
class MessageImagemapAction(ImagemapAction):
"""MessageImagemapAction.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
"""
def __init__(self, text=None, area=None, **kwargs):
"""__init__ method.
:param str text: Message to send
:param area: Defined tappable area
:type area: :py:class:`linebot.models.imagemap.ImagemapArea`
:param kwargs:
"""
super(MessageImagemapAction, self).__init__(**kwargs)
self.type = 'message'
self.text = text
self.area = self.get_or_new_from_json_dict(area, ImagemapArea)
class ImagemapArea(Base):
"""ImagemapArea.
https://developers.line.biz/en/reference/messaging-api/#imagemap-area-object
Defines the size of the full imagemap with the width as 1040px.
The top left is used as the origin of the area.
"""
def __init__(self, x=None, y=None, width=None, height=None, **kwargs):
"""__init__ method.
:param int x: Horizontal position of the tappable area
:param int y: Vertical position of the tappable area
:param int width: Width of the tappable area
:param int height: Height of the tappable area
:param kwargs:
"""
super(ImagemapArea, self).__init__(**kwargs)
self.x = x
self.y = y
self.width = width
self.height = height
class Video(Base):
"""Video.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
Defines the properties of the video object in imagemap.
"""
def __init__(self, original_content_url=None, preview_image_url=None,
area=None, external_link=None, **kwargs):
"""__init__ method.
:param str original_content_url: URL of the video file
:param str preview_image_url: URL of the preview image
:param area: Defined video area
:type area: :py:class:`linebot.models.imagemap.ImagemapArea`
:param external_link: Defined video external link
:type external_link: :py:class:`linebot.models.imagemap.ExternalLink`
:param kwargs:
"""
super(Video, self).__init__(**kwargs)
self.original_content_url = original_content_url
self.preview_image_url = preview_image_url
self.area = self.get_or_new_from_json_dict(area, ImagemapArea)
self.external_link = self.get_or_new_from_json_dict(external_link, ExternalLink)
class ExternalLink(Base):
"""ExternalLink.
https://developers.line.biz/en/reference/messaging-api/#imagemap-message
Defines URL and label of external link in video.
"""
def __init__(self, link_uri=None, label=None, **kwargs):
"""__init__ method.
:param str link_uri: Webpage URL
:param str label: Label
:param kwargs:
"""
super(ExternalLink, self).__init__(**kwargs)
self.link_uri = link_uri
self.label = label
|
torch/optim/rprop.py | Hacky-DH/pytorch | 60,067 | 11112748 | import torch
from . import _functional as F
from .optimizer import Optimizer
class Rprop(Optimizer):
r"""Implements the resilient backpropagation algorithm.
.. math::
\begin{aligned}
&\rule{110mm}{0.4pt} \\
&\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
\text{ (objective)}, \\
&\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
\text{ (step sizes)} \\
&\textbf{initialize} : g^0_{prev} \leftarrow 0,
\: \eta_0 \leftarrow \text{lr (learning rate)} \\
&\rule{110mm}{0.4pt} \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
&\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\
&\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\
&\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
\Gamma_{max}) \\
&\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\
&\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
\Gamma_{min}) \\
&\hspace{10mm} \textbf{else} \: \\
&\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\
&\hspace{5mm}g_{prev} \leftarrow g_t \\
&\rule{110mm}{0.4pt} \\[-1.ex]
&\bf{return} \: \theta_t \\[-1.ex]
&\rule{110mm}{0.4pt} \\[-1.ex]
\end{aligned}
For further details regarding the algorithm we refer to the paper
`A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
etas (Tuple[float, float], optional): pair of (etaminus, etaplis), that
are multiplicative increase and decrease factors
(default: (0.5, 1.2))
step_sizes (Tuple[float, float], optional): a pair of minimal and
maximal allowed step sizes (default: (1e-6, 50))
"""
def __init__(self, params, lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50)):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < etas[0] < 1.0 < etas[1]:
raise ValueError("Invalid eta values: {}, {}".format(etas[0], etas[1]))
defaults = dict(lr=lr, etas=etas, step_sizes=step_sizes)
super(Rprop, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params = []
grads = []
prevs = []
step_sizes = []
for p in group['params']:
if p.grad is None:
continue
params.append(p)
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Rprop does not support sparse gradients')
grads.append(grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['prev'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['step_size'] = grad.new().resize_as_(grad).fill_(group['lr'])
prevs.append(state['prev'])
step_sizes.append(state['step_size'])
etaminus, etaplus = group['etas']
step_size_min, step_size_max = group['step_sizes']
state['step'] += 1
F.rprop(params,
grads,
prevs,
step_sizes,
step_size_min=step_size_min,
step_size_max=step_size_max,
etaminus=etaminus,
etaplus=etaplus)
return loss
|
semtorch/models/archs/backbones/mobilenet.py | WaterKnight1998/SemTorch | 145 | 11112778 | """MobileNet and MobileNetV2."""
import torch.nn as nn
from .registry import BACKBONE_REGISTRY
from ...modules import _ConvBNReLU, _DepthwiseConv, InvertedResidual
from ...config import cfg
__all__ = ['MobileNet', 'MobileNetV2']
class MobileNet(nn.Module):
def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d):
super(MobileNet, self).__init__()
multiplier = cfg.MODEL.BACKBONE_SCALE
conv_dw_setting = [
[64, 1, 1],
[128, 2, 2],
[256, 2, 2],
[512, 6, 2],
[1024, 2, 2]]
input_channels = int(32 * multiplier) if multiplier > 1.0 else 32
features = [_ConvBNReLU(3, input_channels, 3, 2, 1, norm_layer=norm_layer)]
for c, n, s in conv_dw_setting:
out_channels = int(c * multiplier)
for i in range(n):
stride = s if i == 0 else 1
features.append(_DepthwiseConv(input_channels, out_channels, stride, norm_layer))
input_channels = out_channels
self.last_inp_channels = int(1024 * multiplier)
features.append(nn.AdaptiveAvgPool2d(1))
self.features = nn.Sequential(*features)
self.classifier = nn.Linear(int(1024 * multiplier), num_classes)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), x.size(1)))
return x
class MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, norm_layer=nn.BatchNorm2d):
super(MobileNetV2, self).__init__()
output_stride = cfg.MODEL.OUTPUT_STRIDE
self.multiplier = cfg.MODEL.BACKBONE_SCALE
if output_stride == 32:
dilations = [1, 1]
elif output_stride == 16:
dilations = [1, 2]
elif output_stride == 8:
dilations = [2, 4]
else:
raise NotImplementedError
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1]]
# building first layer
input_channels = int(32 * self.multiplier) if self.multiplier > 1.0 else 32
# last_channels = int(1280 * multiplier) if multiplier > 1.0 else 1280
self.conv1 = _ConvBNReLU(3, input_channels, 3, 2, 1, relu6=True, norm_layer=norm_layer)
# building inverted residual blocks
self.planes = input_channels
self.block1 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[0:1],
norm_layer=norm_layer)
self.block2 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[1:2],
norm_layer=norm_layer)
self.block3 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[2:3],
norm_layer=norm_layer)
self.block4 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[3:5],
dilations[0], norm_layer=norm_layer)
self.block5 = self._make_layer(InvertedResidual, self.planes, inverted_residual_setting[5:],
dilations[1], norm_layer=norm_layer)
self.last_inp_channels = self.planes
# building last several layers
# features = list()
# features.append(_ConvBNReLU(input_channels, last_channels, 1, relu6=True, norm_layer=norm_layer))
# features.append(nn.AdaptiveAvgPool2d(1))
# self.features = nn.Sequential(*features)
#
# self.classifier = nn.Sequential(
# nn.Dropout2d(0.2),
# nn.Linear(last_channels, num_classes))
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def _make_layer(self, block, planes, inverted_residual_setting, dilation=1, norm_layer=nn.BatchNorm2d):
features = list()
for t, c, n, s in inverted_residual_setting:
out_channels = int(c * self.multiplier)
stride = s if dilation == 1 else 1
features.append(block(planes, out_channels, stride, t, dilation, norm_layer))
planes = out_channels
for i in range(n - 1):
features.append(block(planes, out_channels, 1, t, norm_layer=norm_layer))
planes = out_channels
self.planes = planes
return nn.Sequential(*features)
def forward(self, x):
x = self.conv1(x)
x = self.block1(x)
c1 = self.block2(x)
c2 = self.block3(c1)
c3 = self.block4(c2)
c4 = self.block5(c3)
# x = self.features(x)
# x = self.classifier(x.view(x.size(0), x.size(1)))
return c1, c2, c3, c4
@BACKBONE_REGISTRY.register()
def mobilenet_v1(norm_layer=nn.BatchNorm2d):
return MobileNet(norm_layer=norm_layer)
@BACKBONE_REGISTRY.register()
def mobilenet_v2(norm_layer=nn.BatchNorm2d):
return MobileNetV2(norm_layer=norm_layer)
|
tests/syntax/def_name_is_parameter_and_global.py | matan-h/friendly | 287 | 11112850 | <reponame>matan-h/friendly
"""Should raise SyntaxError: name 'x' is parameter and global
"""
def f(x):
global x
|
data_collection/gazette/spiders/base/dosp.py | itepifanio/querido-diario | 268 | 11112864 | import base64
import datetime
import json
import dateparser
import scrapy
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class DospGazetteSpider(BaseGazetteSpider):
allowed_domains = ["dosp.com.br", "imprensaoficialmunicipal.com.br"]
# Must be defined into child classes
code = None
start_date = None
def start_requests(self):
FORMAT_DATE = "%Y-%m-%d"
target_date = self.start_date
end_date = datetime.date.today()
while target_date <= end_date:
from_data = target_date.strftime(FORMAT_DATE)
target_date = target_date + datetime.timedelta(weeks=1)
to_date = target_date.strftime(FORMAT_DATE)
yield scrapy.Request(
f"https://dosp.com.br/api/index.php/dioedata.js/{self.code}/{from_data}/{to_date}?callback=dioe"
)
def parse(self, response):
# The response are in a javascript format, then needs some clean up
data = json.loads(response.text[6:-2])
for item in data["data"]:
code = item["iddo"]
code = str(code).encode("ascii")
pdf_code = base64.b64encode(code).decode("ascii")
file_url = f"https://dosp.com.br/exibe_do.php?i={pdf_code}"
edition_number = item["edicao_do"]
date = dateparser.parse(item["data"]).date()
yield Gazette(
date=date,
file_urls=[file_url],
edition_number=edition_number,
power="executive_legislative",
)
|
scripts/19-reshelve.py | jmviz/xd | 179 | 11112866 | #!/usr/bin/env python3
# Usage: $0 [-c <corpus>] <regex> <pubid>
#
# rewrites receipts.tsv and fills in any blanks based on regex
#
# git mv all .xd with pubid of <src> to have a pubid of <dest> (simple file rename)
#
#
import re
from xdfile import utils, metadatabase as metadb, catalog
def main():
args = utils.get_args()
all_receipts = metadb.xd_receipts_header
receipts = metadb.xd_receipts_rows()
rids = set() # set of ReceiptId
for r in receipts:
oldpubid = ""
oldpubid = utils.parse_pubid(r.xdid or '')
newpubid = catalog.find_pubid("|".join((str(x) for x in r)))
d = r._asdict()
if newpubid and newpubid != oldpubid:
seqnum = utils.parse_seqnum(r.xdid or r.SourceFilename)
if seqnum:
newxdid = newpubid + seqnum
utils.info("changing xdid from '%s' to '%s'" % (r.xdid, newxdid))
d["xdid"] = newxdid
else:
utils.info("no date or number in xdid, not reshelving")
all_receipts += metadb.xd_receipts_row(**d)
open(metadb.RECEIPTS_TSV, 'w').write(all_receipts)
main()
|
clinica/iotools/converters/oasis_to_bids/oasis_to_bids_cli.py | Raelag0112/clinica | 135 | 11112868 | import click
from clinica.iotools.converters import cli_param
@click.command(name="oasis-to-bids")
@cli_param.dataset_directory
@cli_param.clinical_data_directory
@cli_param.bids_directory
def cli(
dataset_directory: str,
clinical_data_directory: str,
bids_directory: str,
) -> None:
"""OASIS to BIDS converter.
Convert the imaging and clinical data of OASIS (http://oasis-brains.org/), located in DATASET_DIRECTORY and
CLINICAL_DATA_DIRECTORY respectively, to a BIDS dataset in the target BIDS_DIRECTORY.
"""
from clinica.iotools.converters.oasis_to_bids.oasis_to_bids import OasisToBids
oasis_to_bids = OasisToBids()
oasis_to_bids.convert_images(dataset_directory, bids_directory)
oasis_to_bids.convert_clinical_data(clinical_data_directory, bids_directory)
if __name__ == "__main__":
cli()
|
omega_miya/database/model/bot_self.py | rinrini001/omega-miya | 120 | 11112881 | """
@Author : Ailitonia
@Date : 2021/05/23 19:32
@FileName : bot_self.py
@Project : nonebot2_miya
@Description : BotSelf Table Model
@GitHub : https://github.com/Ailitonia
@Software : PyCharm
"""
from omega_miya.database.database import BaseDB
from omega_miya.database.class_result import Result
from omega_miya.database.tables import BotSelf
from datetime import datetime
from sqlalchemy.future import select
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
class DBBot(object):
def __init__(self, self_qq: int):
self.self_qq = self_qq
async def id(self) -> Result.IntResult:
async_session = BaseDB().get_async_session()
async with async_session() as session:
async with session.begin():
try:
session_result = await session.execute(
select(BotSelf.id).where(BotSelf.self_qq == self.self_qq)
)
bot_table_id = session_result.scalar_one()
result = Result.IntResult(error=False, info='Success', result=bot_table_id)
except NoResultFound:
result = Result.IntResult(error=True, info='NoResultFound', result=-1)
except MultipleResultsFound:
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
async def exist(self) -> bool:
result = await self.id()
return result.success()
async def upgrade(self, status: int = 0, info: str = None) -> Result.IntResult:
async_session = BaseDB().get_async_session()
async with async_session() as session:
try:
async with session.begin():
try:
# 已存在则更新表中已有信息
session_result = await session.execute(
select(BotSelf).where(BotSelf.self_qq == self.self_qq)
)
exist_bot = session_result.scalar_one()
exist_bot.status = status
if info:
exist_bot.info = info
exist_bot.updated_at = datetime.now()
result = Result.IntResult(error=False, info='Success upgraded', result=0)
except NoResultFound:
# 不存在则在表中添加新信息
new_bot = BotSelf(self_qq=self.self_qq, status=status, info=info,
created_at=datetime.now())
session.add(new_bot)
result = Result.IntResult(error=False, info='Success added', result=0)
await session.commit()
except MultipleResultsFound:
await session.rollback()
result = Result.IntResult(error=True, info='MultipleResultsFound', result=-1)
except Exception as e:
await session.rollback()
result = Result.IntResult(error=True, info=repr(e), result=-1)
return result
|
examples/pyro/pyro_logregression.py | PGM-Lab/InferPy | 140 | 11112894 | ### Setting up
import torch
import pyro
from pyro.distributions import Normal, Binomial
from pyro.infer import SVI, Trace_ELBO
from pyro.optim import Adam
from pyro.contrib.autoguide import AutoDiagonalNormal
d = 2
N = 1000
### 17
### Model definition ###
def log_reg(x_data=None, y_data=None):
w = pyro.sample("w", Normal(torch.zeros(d), torch.ones(d)))
w0 = pyro.sample("w0", Normal(0., 1.))
with pyro.plate("map", N):
x = pyro.sample("x", Normal(torch.zeros(d), 2).to_event(1), obs=x_data)
logits = (w0 + x @ torch.FloatTensor(w)).squeeze(-1)
y = pyro.sample("pred", Binomial(logits = logits), obs=y_data)
return x,y
qmodel = AutoDiagonalNormal(log_reg)
### 37
#### Sample from prior model
sampler = pyro.condition(log_reg, data={"w0": 0, "w": [2,1]})
x_train, y_train = sampler()
#### Inference
optim = Adam({"lr": 0.1})
svi = SVI(log_reg, qmodel, optim, loss=Trace_ELBO(), num_samples=10)
num_iterations = 10000
pyro.clear_param_store()
for j in range(num_iterations):
# calculate the loss and take a gradient step
loss = svi.step(x_train, y_train)
print("[iteration %04d] loss: %.4f" % (j + 1, loss / len(x_train)))
#### Usage of the inferred model
# Print the parameters
w_post = qmodel()["w"]
w0_post = qmodel()["w0"]
print(w_post, w0_post)
# Sample from the posterior
sampler_post = pyro.condition(log_reg, data={"w0": w0_post, "w": w_post})
x_gen, y_gen = sampler_post()
print(x_gen, y_gen)
##### Plot the results
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 5))
for c in [0, 1]:
x_gen_c = x_gen[y_gen.flatten() == c, :]
plt.plot(x_gen_c[:, 0], x_gen_c[:, 1], 'bx' if c == 0 else 'rx')
plt.show()
### 90 |
src/python/twitter/common/python/base.py | zhouyijiaren/commons | 1,143 | 11112903 | from __future__ import absolute_import
from pex.base import *
|
examples/wide_form_violinplot.py | amirhosseindavoody/seaborn | 8,852 | 11112917 | """
Violinplot from a wide-form dataset
===================================
_thumb: .6, .45
"""
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style="whitegrid")
# Load the example dataset of brain network correlations
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Pull out a specific subset of networks
used_networks = [1, 3, 4, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Compute the correlation matrix and average over networks
corr_df = df.corr().groupby(level="network").mean()
corr_df.index = corr_df.index.astype(int)
corr_df = corr_df.sort_index().T
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 6))
# Draw a violinplot with a narrower bandwidth than the default
sns.violinplot(data=corr_df, palette="Set3", bw=.2, cut=1, linewidth=1)
# Finalize the figure
ax.set(ylim=(-.7, 1.05))
sns.despine(left=True, bottom=True)
|
mmfashion/models/backbones/__init__.py | RyanJiang0416/mmfashion | 952 | 11112922 | <reponame>RyanJiang0416/mmfashion
from .resnet import ResNet
from .vgg import Vgg
__all__ = ['Vgg', 'ResNet']
|
tf_base/src/network/lenetem.py | borgr/active_learning_coreset | 181 | 11112965 | from network import Network
class LeNetEm(Network):
def setup(self):
(self.feed('data')
.reshape([-1,28,28,1], name='data_reshape')
.conv(5, 5, 20, 1, 1, padding='VALID', relu=False, name='conv1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(5, 5, 50, 1, 1, padding='VALID', relu=False, name='conv2')
.max_pool(2, 2, 2, 2, name='pool2')
.fc(500, name='ip1')
.fc(64, relu=False, name='ip2'))
|
colossalai/zero/sharded_param/sharded_tensor.py | RichardoLuo/ColossalAI | 1,630 | 11112972 | import torch
from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState
class ShardedTensor(StatefulTensor):
def __init__(self, tensor: torch.Tensor, state: TensorState = TensorState.HOLD) -> None:
r"""
A tensor sharded in multiple processes. Constructed from an existing torch.Tensor instance.
"""
assert tensor.requires_grad is False
super().__init__(tensor, state)
# kept the shape, numel and dtype of the init tensor.
self._origin_shape = tensor.shape
self._origin_numel = tensor.numel()
self._origin_dtype = tensor.dtype
self._is_sharded = False
@property
def dtype(self) -> torch.dtype:
assert self._payload.dtype == self._origin_dtype
return self._payload.dtype
@property
def origin_numel(self) -> int:
return self._origin_numel
@property
def origin_shape(self) -> int:
return self._origin_shape
@property
def is_sharded(self):
return self._is_sharded
@is_sharded.setter
def is_sharded(self, flag: bool):
self._is_sharded = flag
|
compiler/bon.py | cheery/lever | 136 | 11112974 | """
binary object notation
A data-interchange format. Used here because it is simpler to
decode than other similar formats and can contain custom encodings.
"""
import struct
types = {}
decoder = {}
encoder = {}
def load(fd):
type_id = ord(fd.read(1))
return decoder[type_id](fd)
def dump(fd, obj):
type_id = types[type(obj)]
fd.write(chr(type_id))
return encoder[type_id](fd, obj)
def r32(fd):
a, b, c, d = fd.read(4)
return ord(a) << 24 | ord(b) << 16 | ord(c) << 8 | ord(d)
def w32(fd, value):
fd.write(''.join((
chr(value >> 24 & 255),
chr(value >> 16 & 255),
chr(value >> 8 & 255),
chr(value >> 0 & 255),
)))
def rlong(fd):
"http://en.wikipedia.org/wiki/Variable-length_quantity"
sign = +1
output = 0
ubyte = ord(fd.read(1))
if ubyte & 0x40 == 0x40:
sign = -1
ubyte &= 0xBF
while ubyte & 0x80:
output |= ubyte & 0x7F
output <<= 7
ubyte = ord(fd.read(1))
output |= ubyte
return output * sign
def wlong(fd, value):
"http://en.wikipedia.org/wiki/Variable-length_quantity"
output = []
if value < 0:
negative = True
value = -value
else:
negative = False
output.append(value & 0x7F)
while value > 0x7F:
value >>= 7
output.append(0x80 | value & 0x7F)
if output[-1] & 0x40 != 0:
output.append(0x80)
if negative:
output[-1] |= 0x40
fd.write(''.join(map(chr, reversed(output))))
types[int] = 0
types[long] = 0
decoder[0] = rlong
encoder[0] = wlong
def rdouble(fd):
return struct.unpack('!d', fd.read(8))[0]
def wdouble(fd, obj):
fd.write(struct.pack('!d', obj))
types[float] = 1
decoder[1] = rdouble
encoder[1] = wdouble
def rstring(fd):
length = r32(fd)
return fd.read(length).decode('utf-8')
def wstring(fd, obj):
w32(fd, len(obj))
fd.write(obj.encode('utf-8'))
types[unicode] = 2
decoder[2] = rstring
encoder[2] = wstring
def rlist(fd):
length = r32(fd)
sequence = []
for _ in range(length):
sequence.append(load(fd))
return sequence
def wlist(fd, obj):
w32(fd, len(obj))
for value in obj:
dump(fd, value)
types[list] = 3
types[tuple] = 3
decoder[3] = rlist
encoder[3] = wlist
def rdict(fd):
length = r32(fd)
dictionary = dict()
for _ in range(length):
key = load(fd)
val = load(fd)
dictionary[key] = val
return dictionary
def wdict(fd, obj):
w32(fd, len(obj))
for key, value in obj.iteritems():
dump(fd, key)
dump(fd, value)
types[dict] = 4
decoder[4] = rdict
encoder[4] = wdict
def rbytes(fd):
length = r32(fd)
return fd.read(length)
def wbytes(fd, obj):
w32(fd, len(obj))
fd.write(obj)
types[bytes] = 5
decoder[5] = rbytes
encoder[5] = wbytes
def rboolean(fd):
return fd.read(1) != '\x00'
def wboolean(fd, obj):
return fd.write('\x00\x01'[obj])
types[bool] = 6
decoder[6] = rboolean
encoder[6] = wboolean
def rnull(fd):
return None
def wnull(fd, obj):
pass
types[type(None)] = 7
decoder[7] = rnull
encoder[7] = wnull
|
examples/plots/partial-dependence-plot-2D.py | samueljamesbell/scikit-optimize | 2,404 | 11113035 | """
===========================
Partial Dependence Plots 2D
===========================
Hvass-Labs Dec 2017
<NAME> 2020
.. currentmodule:: skopt
Simple example to show the new 2D plots.
"""
print(__doc__)
import numpy as np
from math import exp
from skopt import gp_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_histogram, plot_objective_2D, plot_objective
from skopt.utils import point_asdict
np.random.seed(123)
import matplotlib.pyplot as plt
#############################################################################
dim_learning_rate = Real(name='learning_rate', low=1e-6, high=1e-2, prior='log-uniform')
dim_num_dense_layers = Integer(name='num_dense_layers', low=1, high=5)
dim_num_dense_nodes = Integer(name='num_dense_nodes', low=5, high=512)
dim_activation = Categorical(name='activation', categories=['relu', 'sigmoid'])
dimensions = [dim_learning_rate,
dim_num_dense_layers,
dim_num_dense_nodes,
dim_activation]
default_parameters = [1e-4, 1, 64, 'relu']
def model_fitness(x):
learning_rate, num_dense_layers, num_dense_nodes, activation = x
fitness = ((exp(learning_rate) - 1.0) * 1000) ** 2 + \
(num_dense_layers) ** 2 + \
(num_dense_nodes/100) ** 2
fitness *= 1.0 + 0.1 * np.random.rand()
if activation == 'sigmoid':
fitness += 10
return fitness
print(model_fitness(x=default_parameters))
#############################################################################
search_result = gp_minimize(func=model_fitness,
dimensions=dimensions,
n_calls=30,
x0=default_parameters,
random_state=123
)
print(search_result.x)
print(search_result.fun)
#############################################################################
for fitness, x in sorted(zip(search_result.func_vals, search_result.x_iters)):
print(fitness, x)
#############################################################################
space = search_result.space
print(search_result.x_iters)
search_space = {name: space[name][1] for name in space.dimension_names}
print(point_asdict(search_space, default_parameters))
#############################################################################
print("Plotting now ...")
_ = plot_histogram(result=search_result, dimension_identifier='learning_rate',
bins=20)
plt.show()
#############################################################################
_ = plot_objective_2D(result=search_result,
dimension_identifier1='learning_rate',
dimension_identifier2='num_dense_nodes')
plt.show()
#############################################################################
_ = plot_objective_2D(result=search_result,
dimension_identifier1='num_dense_layers',
dimension_identifier2='num_dense_nodes')
plt.show()
#############################################################################
_ = plot_objective(result=search_result,
plot_dims=['num_dense_layers',
'num_dense_nodes'])
plt.show()
|
tests/slack_sdk/web/test_slack_response.py | priya1puresoftware/python-slack-sdk | 2,486 | 11113078 | <filename>tests/slack_sdk/web/test_slack_response.py
import unittest
from slack_sdk.web import WebClient
from slack_sdk.web.slack_response import SlackResponse
class TestSlackResponse(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# https://github.com/slackapi/python-slackclient/issues/559
def test_issue_559(self):
response = SlackResponse(
client=WebClient(token="<KEY>"),
http_verb="POST",
api_url="http://localhost:3000/api.test",
req_args={},
data={"ok": True, "args": {"hello": "world"}},
headers={},
status_code=200,
)
self.assertTrue("ok" in response.data)
self.assertTrue("args" in response.data)
self.assertFalse("error" in response.data)
# https://github.com/slackapi/python-slack-sdk/issues/1100
def test_issue_1100(self):
response = SlackResponse(
client=WebClient(token="<KEY>"),
http_verb="POST",
api_url="http://localhost:3000/api.test",
req_args={},
data=None,
headers={},
status_code=200,
)
with self.assertRaises(ValueError):
response["foo"]
foo = response.get("foo")
self.assertIsNone(foo)
# https://github.com/slackapi/python-slack-sdk/issues/1102
def test_issue_1102(self):
response = SlackResponse(
client=WebClient(token="<KEY>"),
http_verb="POST",
api_url="http://localhost:3000/api.test",
req_args={},
data={"ok": True, "args": {"hello": "world"}},
headers={},
status_code=200,
)
self.assertTrue("ok" in response)
self.assertTrue("foo" not in response)
|
samcli/lib/init/template_modifiers/xray_tracing_template_modifier.py | praneetap/aws-sam-cli | 2,285 | 11113131 | <gh_stars>1000+
"""
Class used to parse and update template when tracing is enabled
"""
import logging
from samcli.lib.init.template_modifiers.cli_template_modifier import TemplateModifier
LOG = logging.getLogger(__name__)
class XRayTracingTemplateModifier(TemplateModifier):
FIELD_NAME_FUNCTION_TRACING = "Tracing"
FIELD_NAME_API_TRACING = "TracingEnabled"
GLOBALS = "Globals:\n"
RESOURCE = "Resources:\n"
FUNCTION = " Function:\n"
TRACING_FUNCTION = " Tracing: Active\n"
API = " Api:\n"
TRACING_API = " TracingEnabled: True\n"
COMMENT = (
"# More info about Globals: "
"https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst\n"
)
def _add_new_field_to_template(self):
"""
Add new field to SAM template
"""
global_section_position = self._section_position(self.GLOBALS)
if global_section_position >= 0:
self._add_tracing_section(
global_section_position, self.FUNCTION, self.FIELD_NAME_FUNCTION_TRACING, self.TRACING_FUNCTION
)
self._add_tracing_section(global_section_position, self.API, self.FIELD_NAME_API_TRACING, self.TRACING_API)
else:
self._add_tracing_with_globals()
def _add_tracing_with_globals(self):
"""Adds Globals and tracing fields"""
resource_section_position = self._section_position(self.RESOURCE)
globals_section_data = [
self.COMMENT,
self.GLOBALS,
self.FUNCTION,
self.TRACING_FUNCTION,
self.API,
self.TRACING_API,
"\n",
]
self.template = (
self.template[:resource_section_position] + globals_section_data + self.template[resource_section_position:]
)
def _add_tracing_section(
self,
global_section_position: int,
parent_section: str,
tracing_field_name: str,
tracing_field: str,
):
"""
Adds tracing into the designated field
Parameters
----------
global_section_position : dict
Position of the Globals field in the template
parent_section: str
Name of the parent section that the tracing field would be added.
tracing_field_name: str
Name of the tracing field, which will be used to check if it already exist
tracing_field: str
Name of the whole tracing field, which includes its name and value
"""
parent_section_position = self._section_position(parent_section, global_section_position)
if parent_section_position >= 0:
field_positon_function = self._field_position(parent_section_position, tracing_field_name)
if field_positon_function >= 0:
self.template[field_positon_function] = tracing_field
else:
self.template = self._add_fields_to_section(parent_section_position, [tracing_field])
else:
self.template = self._add_fields_to_section(global_section_position, [parent_section, tracing_field])
def _print_sanity_check_error(self):
link = (
"https://docs.aws.amazon.com/serverless-application-model/latest"
"/developerguide/sam-resource-function.html#sam-function-tracing"
)
message = f"Warning: Unable to add Tracing to the project. To learn more about Tracing visit {link}"
LOG.warning(message)
|
evosax/strategies/gld.py | RobertTLange/evosax | 102 | 11113136 | <gh_stars>100-1000
import jax
import jax.numpy as jnp
import chex
from typing import Tuple
from ..strategy import Strategy
class GLD(Strategy):
def __init__(self, num_dims: int, popsize: int):
"""Gradientless Descent (Golovin et al., 2019)
Reference: https://arxiv.org/pdf/1911.06317.pdf"""
super().__init__(num_dims, popsize)
self.strategy_name = "GLD"
@property
def params_strategy(self) -> chex.ArrayTree:
"""Return default parameters of evolution strategy."""
return {
"init_min": 0.0,
"init_max": 0.0,
"radius_max": 0.05,
"radius_min": 0.001,
"radius_decay": 5,
}
def initialize_strategy(
self, rng: chex.PRNGKey, params: chex.ArrayTree
) -> chex.ArrayTree:
"""`initialize` the evolution strategy."""
initialization = jax.random.uniform(
rng,
(self.num_dims,),
minval=params["init_min"],
maxval=params["init_max"],
)
state = {
"mean": initialization,
}
return state
def ask_strategy(
self, rng: chex.PRNGKey, state: chex.ArrayTree, params: chex.ArrayTree
) -> Tuple[chex.Array, chex.ArrayTree]:
"""`ask` for new proposed candidates to evaluate next."""
# Sampling of N(0, 1) noise
z = jax.random.normal(
rng,
(self.popsize, self.num_dims),
)
# Exponentially decaying sigma scale
sigma_scale = params["radius_min"] + jnp.exp2(
-jnp.arange(self.popsize) / params["radius_decay"]
) * (params["radius_max"] - params["radius_min"])
sigma_scale = sigma_scale.reshape(-1, 1)
# print(state["best_member"].shape, (sigma_scale * z).shape)
x = state["best_member"] + sigma_scale * z
return x, state
def tell_strategy(
self,
x: chex.Array,
fitness: chex.Array,
state: chex.ArrayTree,
params: chex.ArrayTree,
) -> chex.ArrayTree:
"""`tell` update to ES state."""
# No state update needed - everything happens with best_member update
state["mean"] = state["best_member"]
return state
|
minimongo/index.py | ricksore/minimongo | 144 | 11113147 | # -*- coding: utf-8 -*-
class Index(object):
"""A simple wrapper for arguments to
:meth:`pymongo.collection.Collection.ensure_index`."""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def __eq__(self, other):
"""Two indices are equal, when they have equal arguments.
>>> Index(42, foo='bar') == Index(42, foo='bar')
True
>>> Index(foo='bar') == Index(42, foo='bar')
False
"""
return self.__dict__ == other.__dict__
def ensure(self, collection):
"""Calls :meth:`pymongo.collection.Collection.ensure_index`
on the given `collection` with the stored arguments.
"""
return collection.ensure_index(*self._args, **self._kwargs)
|
src/hg/makeDb/genbank/src/lib/py/genbank/__init__.py | andypohl/kent | 171 | 11113152 | <filename>src/hg/makeDb/genbank/src/lib/py/genbank/__init__.py<gh_stars>100-1000
import sys, os
# require 2.4 or newer
if (sys.version_info[0] <= 2) and (sys.version_info[1] < 4):
raise Exception("python 2.4 or newer required, using " + sys.version)
def gbAbsBinDir():
"get the absolute path to the bin directory"
return os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
def gbSetupPath():
"setup PATH to include gbRoot bin directories"
absBinDir = gbAbsBinDir()
mach = os.uname()[4]
if mach == "i686":
mach = "i386"
machBinPath = absBinDir + "/" + mach
if mach != "i386":
machBinPath += ":" + absBinDir + "/i386" # use on x86_64 for now
os.environ["PATH"] = machBinPath + ":" + os.environ["PATH"]
def gbSetupHgConf():
"setup the HGDB_CONF environment variable as needed"
# only set HGDB_CONF to one of the gbRoot/etc ones if we are user genbank
if os.environ["USER"] == "genbank":
host = os.uname()[1]
if host == "hgwbeta":
hgconf = "etc/.hg.mysqlbeta.conf"
elif host == "hgnfs1":
hgconf = "etc/.hg.mysqlrr.conf"
else:
hgconf = "etc/.hg.conf"
os.environ["HGDB_CONF"] = os.path.abspath(hgconf)
|
MDEQ-Vision/lib/config/models.py | ashwinipokle/deq | 548 | 11113159 | # Modified based on the HRNet repo.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from yacs.config import CfgNode as CN
MDEQ = CN()
MDEQ.FULL_STAGE = CN()
MDEQ.FULL_STAGE.NUM_MODULES = 1
MDEQ.FULL_STAGE.NUM_BRANCHES = 4
MDEQ.FULL_STAGE.NUM_BLOCKS = [1, 1, 1, 1]
MDEQ.FULL_STAGE.NUM_CHANNELS = [64, 128, 256, 512]
MDEQ.FULL_STAGE.BIG_KERNELS = [0, 0, 0, 0]
MDEQ.FULL_STAGE.HEAD_CHANNELS = [32, 64, 128, 256] # Only for classification
MDEQ.FULL_STAGE.FINAL_CHANSIZE = 2048 # Only for classification
MDEQ.FULL_STAGE.BLOCK = 'BASIC'
MDEQ.FULL_STAGE.FUSE_METHOD = 'SUM'
MODEL_EXTRAS = {
'mdeq': MDEQ
}
|
apps/venv/lib/python2.7/site-packages/MySQLdb/constants/CR.py | gmacchi93/serverInfoParaguay | 5,079 | 11113163 | """MySQL Connection Errors
Nearly all of these raise OperationalError. COMMANDS_OUT_OF_SYNC
raises ProgrammingError.
"""
MIN_ERROR = 2000
MAX_ERROR = 2999
UNKNOWN_ERROR = 2000
SOCKET_CREATE_ERROR = 2001
CONNECTION_ERROR = 2002
CONN_HOST_ERROR = 2003
IPSOCK_ERROR = 2004
UNKNOWN_HOST = 2005
SERVER_GONE_ERROR = 2006
VERSION_ERROR = 2007
OUT_OF_MEMORY = 2008
WRONG_HOST_INFO = 2009
LOCALHOST_CONNECTION = 2010
TCP_CONNECTION = 2011
SERVER_HANDSHAKE_ERR = 2012
SERVER_LOST = 2013
COMMANDS_OUT_OF_SYNC = 2014
NAMEDPIPE_CONNECTION = 2015
NAMEDPIPEWAIT_ERROR = 2016
NAMEDPIPEOPEN_ERROR = 2017
NAMEDPIPESETSTATE_ERROR = 2018
CANT_READ_CHARSET = 2019
NET_PACKET_TOO_LARGE = 2020
|
samples/RecurringTask.py | amih90/bacpypes | 240 | 11113177 | #!/usr/bin/env python
"""
This application demonstrates doing something at a regular interval.
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ArgumentParser
from bacpypes.core import run
from bacpypes.task import RecurringTask
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# PrairieDog
#
@bacpypes_debugging
class PrairieDog(RecurringTask):
def __init__(self, dog_number, interval):
if _debug: PrairieDog._debug("__init__ %r %r", dog_number, interval)
RecurringTask.__init__(self, interval)
# save the identity
self.dog_number = dog_number
# install it
self.install_task()
def process_task(self):
if _debug: PrairieDog._debug("process_task")
sys.stdout.write("%d woof!\n" % (self.dog_number,))
#
# __main__
#
def main():
# parse the command line arguments
parser = ArgumentParser(description=__doc__)
# add an argument for seconds per dog
parser.add_argument('seconds', metavar='N', type=int, nargs='+',
help='number of seconds for each dog',
)
# now parse the arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make some dogs
for i, sec in enumerate(args.seconds):
dog = PrairieDog(i, sec * 1000)
if _debug: _log.debug(" - dog: %r", dog)
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
|
test/test_metrics/test_commons.py | wangjunyan305/homura | 102 | 11113211 | import torch
from homura.metrics import commons
# pred: [2, 0, 2, 2]
input = torch.tensor([[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1]], dtype=torch.float)
target = torch.tensor([2, 0, 0, 1], dtype=torch.long)
def test_confusion_matrix():
cm = commons.confusion_matrix(input, target)
expected = torch.zeros(3, 3, dtype=torch.long)
expected[2, 2] = 1
expected[0, 0] = 1
expected[2, 0] = 1
expected[2, 1] = 1
assert all(cm.view(-1) == expected.view(-1))
def test_classwise_accuracy():
assert all(commons.classwise_accuracy(input, target) == torch.tensor([3 / 4, 3 / 4, 2 / 4]))
def test_true_positive():
# class 0: 1
# class 1: 0
# class 2: 1
assert all(commons.true_positive(input, target) == torch.tensor([1, 0, 1]).float())
def test_true_negative():
# class 0: 2
# class 1: 3
# class 2: 1
assert all(commons.true_negative(input, target) == torch.tensor([2, 3, 1]).float())
def test_false_positive():
# class 0: 0
# class 1: 0
# class 2: 2
assert all(commons.false_positive(input, target) == torch.tensor([0, 0, 2]).float())
def test_false_negative():
# class 0: 1
# class 1: 1
# class 2: 0
assert all(commons.false_negative(input, target) == torch.tensor([1, 1, 0]).float())
def test_precision():
# class 1 is nan
assert all(commons.precision(input, target)[[0, 2]] == torch.tensor([1 / 1, 1 / 3]))
def test_recall():
assert all(commons.recall(input, target) == torch.tensor([1 / 2, 0 / 1, 1 / 1]))
def test_specificity():
assert all(commons.specificity(input, target) == torch.tensor([2 / 2, 3 / 3, 1 / 3]))
def test_true_positive_2d():
# torch.randn(2, 3, 2, 2)
input = torch.tensor([[[[0.0146, 0.8026],
[0.5576, -2.3168]],
[[-1.1490, 0.6365],
[-1.1506, -0.6319]],
[[-0.4976, 0.8760],
[0.6989, -1.1562]]],
[[[-0.0541, -0.0892],
[-0.9677, 1.3331]],
[[1.7848, 1.0078],
[0.7506, -1.5101]],
[[-0.6134, 1.9541],
[1.1825, -0.5879]]]])
# argmax(dim=1)
target = torch.tensor([[[0, 2],
[2, 1]],
[[1, 2],
[2, 0]]])
assert all(commons.true_positive(input, target) == torch.tensor([2, 2, 4]).float())
def test_accuracy():
input = torch.tensor([[0.9159, -0.3400, -1.0952, 0.1969, 0.4769],
[-0.1677, 0.7205, 0.3802, -0.8408, 0.5447],
[0.1596, 0.0366, -1.3719, 1.6869, -0.2422]])
# argmax=[0, 1, 3], argmin=[2, 3, 2]
target = torch.tensor([0, 3, 0])
assert commons.accuracy(input, target) == torch.tensor([1 / 3])
assert commons.accuracy(input, target, top_k=3) == torch.tensor([2 / 3])
assert commons.accuracy(input, target, top_k=5) == torch.tensor([1.0])
|
tests/components/xiaomi_miio/__init__.py | MrDelik/core | 30,023 | 11113224 | <gh_stars>1000+
"""Tests for the Xiaomi Miio integration."""
TEST_MAC = "ab:cd:ef:gh:ij:kl"
|
src/robomaster/module.py | talb430/RoboMaster-SDK | 204 | 11113282 | <filename>src/robomaster/module.py
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import protocol
from . import logger
__all__ = ['Module']
# registered protocol dict.
registered_modules = {}
class _AutoRegisterModule(type):
""" help to automatically register Proto Class where ever they're defined """
def __new__(mcs, name, bases, attrs, **kw):
return super().__new__(mcs, name, bases, attrs, **kw)
def __init__(cls, name, bases, attrs, **kw):
super().__init__(name, bases, attrs, **kw)
key = attrs['_host']
if key in registered_modules.keys():
raise ValueError("Duplicate module class {0}".format(name))
registered_modules[key] = cls
class Module(metaclass=_AutoRegisterModule):
# V1 host byte, protocol.host2byte(type, index)
_host = 0
_client = None
_robot = None
def __init__(self, robot):
self._robot = robot
self._client = robot.client
@property
def client(self):
return self._client
def reset(self):
raise Exception("Module, reset function Not Implemented!")
def start(self):
pass
def stop(self):
pass
def get_version(self):
""" 获取模块版本号
:return:字符串,格式为:AA.BB.CC.DD
"""
proto = protocol.ProtoGetVersion()
msg = protocol.Msg(self.client.hostbyte, self._host, proto)
try:
resp_msg = self.client.send_sync_msg(msg)
if resp_msg is not None:
prot = resp_msg.get_proto()
version = "{0:02d}.{1:02d}.{2:02d}.{3:02d}".format(prot._aa, prot._bb, prot._cc, prot._dd)
return version
else:
logger.warning("Module: get_version, {0} failed.".format(self.__class__.__name__))
return None
except Exception as e:
logger.warning("Module: get_version, {0} exception {1}.".format(self.__class__.__name__, str(e)))
return None
def _send_sync_proto(self, proto, target=None):
if not self.client:
return False
if target:
msg = protocol.Msg(self._client.hostbyte, target, proto)
else:
msg = protocol.Msg(self._client.hostbyte, self._host, proto)
try:
resp_msg = self._client.send_sync_msg(msg)
if resp_msg:
proto = resp_msg.get_proto()
if proto._retcode == 0:
return True
else:
logger.warning("{0}: send_sync_proto, proto:{1}, retcode:{2} ".format(self.__class__.__name__,
proto,
proto._retcode))
return False
else:
logger.warning("{0}: send_sync_proto, proto:{1} resp_msg is None.".format(
self.__class__.__name__, proto))
return False
except Exception as e:
logger.warning("{0}: send_sync_proto, proto:{1}, exception:{2}".format(self.__class__.__name__, proto, e))
return False
def _send_async_proto(self, proto, target=None):
if not self.client:
return False
if target:
msg = protocol.Msg(self._client.hostbyte, target, proto)
else:
msg = protocol.Msg(self._client.hostbyte, self._host, proto)
try:
return self._client.send_async_msg(msg)
except Exception as e:
logger.warning("{0}: _send_async_proto, proto:{1}, exception:{2}".format(self.__class__.__name__, proto, e))
return False
|
spec/settings_spec.py | kfischer-okarin/mamba | 462 | 11113310 | # -*- coding: utf-8 -*-
from mamba import description, it, context, before
from expects import expect, have_property, equal, be_false, be_none
from mamba.settings import Settings
IRRELEVANT_SLOW_TEST_THRESHOLD = 'irrelevant slow test threeshold'
IRRELEVANT_ENABLE_CODE_COVERAGE = 'irrelevant enable code coverage'
IRRELEVANT_NO_COLOR = 'irrelevant no color'
with description(Settings) as self:
with before.each:
self.settings = Settings()
with context('when loading defaults'):
with it('has 75 millis as slow test threshold'):
expect(self.settings).to(have_property('slow_test_threshold', equal(0.075)))
with it('has code coverage disabled by default'):
expect(self.settings).to(have_property('enable_code_coverage', be_false))
with it('has no color disabled by default'):
expect(self.settings).to(have_property('no_color', be_false))
with it('has no tags included by default'):
expect(self.settings).to(have_property('tags', be_none))
with context('when setting custom values'):
with it('sets slow test threshold'):
self.settings.slow_test_threshold = IRRELEVANT_SLOW_TEST_THRESHOLD
expect(self.settings).to(have_property('slow_test_threshold', IRRELEVANT_SLOW_TEST_THRESHOLD))
with it('sets code coverage'):
self.settings.enable_code_coverage = IRRELEVANT_ENABLE_CODE_COVERAGE
expect(self.settings).to(have_property('enable_code_coverage', IRRELEVANT_ENABLE_CODE_COVERAGE))
with it('sets no color'):
self.settings.no_color = IRRELEVANT_NO_COLOR
expect(self.settings).to(have_property('no_color', IRRELEVANT_NO_COLOR))
|
Alfred/BilibiliSearch/Source/bilibiliSearch.py | Vespa314/bilibili-api | 1,460 | 11113326 | # -*- coding: utf-8 -*-
import sys
import re
import zlib
import urllib2
import xml.etree.ElementTree as et
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
class Feedback():
"""Feeback used by Alfred Script Filter
Usage:
fb = Feedback()
fb.add_item('Hello', 'World')
fb.add_item('Foo', 'Bar')
print fb
"""
def __init__(self):
self.feedback = et.Element('items')
def __repr__(self):
"""XML representation used by Alfred
Returns:
XML string
"""
return et.tostring(self.feedback)
def add_item(self, title, subtitle = "", arg = "", valid = "yes", autocomplete = "", icon = "icon.png"):
"""
Add item to alfred Feedback
Args:
title(str): the title displayed by Alfred
Keyword Args:
subtitle(str): the subtitle displayed by Alfred
arg(str): the value returned by alfred when item is selected
valid(str): whether or not the entry can be selected in Alfred to trigger an action
autcomplete(str): the text to be inserted if an invalid item is selected. This is only used if 'valid' is 'no'
icon(str): filename of icon that Alfred will display
"""
item = et.SubElement(self.feedback, 'item', uid=str(len(self.feedback)), arg=arg, valid=valid, autocomplete=autocomplete)
_title = et.SubElement(item, 'title')
_title.text = title
_sub = et.SubElement(item, 'subtitle')
_sub.text = subtitle
_icon = et.SubElement(item, 'icon')
_icon.text = icon
query = '{query}'
url = "http://www.bilibili.com/search?keyword=%s&orderby=&formsubmit="%query
req = urllib2.Request(url = url);
content = urllib2.urlopen(req,timeout = 10).read();
content = zlib.decompress(content, 16+zlib.MAX_WBITS)
reg = r'<div class="r"><a href="http://www.bilibili.tv/video/av(\d+)/" target="_blank"><div class="t"><span>([^<]*)</span>([^<]*)</div></a>';
result = re.findall(reg,content,re.S)
fb = Feedback()
try:
for item in result:
avnum = item[0]
avtype = item[1]
title = item[2].strip()
fb.add_item(title,subtitle="%s : http://www.bilibili.tv/video/%s"%(avtype,avnum),arg=avnum)
except SyntaxError as e:
if ('EOF', 'EOL' in e.msg):
fb.add_item('...')
else:
fb.add_item('SyntaxError', e.msg)
except Exception as e:
fb.add_item(e.__class__.__name__,subtitle=e.message)
print fb
|
ioflo/base/deeding.py | BradyHammond/ioflo | 128 | 11113339 | <reponame>BradyHammond/ioflo<gh_stars>100-1000
"""
deeding.py deed module
Backwards compatibility module
Future use doing module instead
"""
from . import doing
# for backwards compatibility
deedify = doing.doify
class Deed(doing.Doer):
pass
class DeedParam(doing.DoerParam):
pass
class DeedSince(doing.DoerSince):
pass
class DeedLapse(doing.DoerLapse):
pass
|
RecoLocalTracker/SubCollectionProducers/python/TopBottomClusterInfoProducer_cfi.py | ckamtsikis/cmssw | 852 | 11113347 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
topBottomClusterInfoProducer = cms.EDProducer("TopBottomClusterInfoProducer",
stripClustersOld = cms.InputTag("siStripClusters"),
pixelClustersOld = cms.InputTag("siPixelClusters"),
stripClustersNew = cms.InputTag("siStripClustersTop"),
pixelClustersNew = cms.InputTag("siPixelClustersTop"),
stripMonoHitsOld = cms.InputTag("siStripMatchedRecHits","rphiRecHit"),
stripStereoHitsOld = cms.InputTag("siStripMatchedRecHits","stereoRecHit"),
pixelHitsOld = cms.InputTag("siPixelRecHits"),
stripMonoHitsNew = cms.InputTag("siStripMatchedRecHitsTop","rphiRecHit"),
stripStereoHitsNew = cms.InputTag("siStripMatchedRecHitsTop","stereoRecHit"),
pixelHitsNew = cms.InputTag("siPixelRecHitsTop")
)
|
pliers/tests/converters/api/test_revai_converters.py | nickduran/pliers | 229 | 11113372 | <reponame>nickduran/pliers
from os.path import join
import pytest
from ...utils import get_test_data_path
from pliers.converters import RevAISpeechAPIConverter
from pliers.stimuli import AudioStim, ComplexTextStim, TextStim
AUDIO_DIR = join(get_test_data_path(), 'audio')
@pytest.mark.requires_payment
@pytest.mark.skipif("'REVAI_ACCESS_TOKEN' not in os.environ")
def test_googleAPI_converter():
stim = AudioStim(join(AUDIO_DIR, 'obama_speech.wav'), onset=4.2)
conv = RevAISpeechAPIConverter()
assert conv.validate_keys()
out_stim = conv.transform(stim)
assert type(out_stim) == ComplexTextStim
text = [elem.text for elem in out_stim]
assert 'years' in text
assert 'together' in text
first_word = next(w for w in out_stim)
assert isinstance(first_word, TextStim)
assert first_word.duration > 0
assert first_word.onset > 4.2 and first_word.onset < 8.0
conv = RevAISpeechAPIConverter(access_token='bad<PASSWORD>')
assert not conv.validate_keys()
|
omega_miya/plugins/omega_plugins_manager/__init__.py | rinrini001/omega-miya | 120 | 11113419 | <reponame>rinrini001/omega-miya
"""
@Author : Ailitonia
@Date : 2021/09/12 14:02
@FileName : __init__.py.py
@Project : nonebot2_miya
@Description : 插件管理器
@GitHub : https://github.com/Ailitonia
@Software : PyCharm
"""
import pathlib
from nonebot import CommandGroup, get_loaded_plugins, logger
from nonebot.plugin.export import export
from nonebot.rule import to_me
from nonebot.permission import SUPERUSER
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import MessageEvent
from nonebot.adapters.cqhttp.message import MessageSegment
from omega_miya.database import DBPlugin
from omega_miya.utils.omega_plugin_utils import init_export, TextUtils
# Custom plugin usage text
__plugin_custom_name__ = 'Omega Plugins Manager'
__plugin_usage__ = r'''【OmegaPluginsManager 插件管理器】
插件管理器
仅限超级管理员使用
**Usage**
**SuperUser Only**
/OPM'''
# Init plugin export
init_export(export(), __plugin_custom_name__, __plugin_usage__)
# 注册事件响应器
OmegaPluginsManager = CommandGroup(
'OPM',
rule=to_me(),
permission=SUPERUSER,
priority=10,
block=True
)
enable_plugin = OmegaPluginsManager.command('enable', aliases={'启用插件'})
disable_plugin = OmegaPluginsManager.command('disable', aliases={'禁用插件'})
list_plugins = OmegaPluginsManager.command('list', aliases={'插件列表'})
# 修改默认参数处理
@enable_plugin.args_parser
async def parse(bot: Bot, event: MessageEvent, state: T_State):
args = event.get_plaintext().strip()
if not args:
await enable_plugin.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == '取消':
await enable_plugin.finish('操作已取消')
@enable_plugin.handle()
async def handle_first_receive(bot: Bot, event: MessageEvent, state: T_State):
args = event.get_plaintext().strip()
if args:
state['plugin_name'] = args
else:
plugin_list = get_loaded_plugins()
# 提示的时候仅显示有 matcher 的插件
plugin_msg = '\n'.join([x.name for x in plugin_list if len(x.matcher) > 0])
msg = f'当前已加载的插件有:\n\n{plugin_msg}'
await enable_plugin.send(msg)
@enable_plugin.got('plugin_name', prompt='请输入需要启用的插件名称:')
async def handle_enable_plugin(bot: Bot, event: MessageEvent, state: T_State):
plugin_name = state['plugin_name']
plugin_list = [x.name for x in get_loaded_plugins()]
if plugin_name not in plugin_list:
await enable_plugin.reject('没有这个插件, 请重新输入需要启用的插件名称:\n取消操作请发送【取消】')
result = await DBPlugin(plugin_name=plugin_name).update(enabled=1)
if result.success():
logger.success(f'OPM | Success enabled plugin {plugin_name} by user {event.user_id}')
await enable_plugin.finish(f'启用插件 {plugin_name} 成功')
else:
logger.error(f'OPM | Failed to enable {plugin_name}, {result.info}')
await enable_plugin.finish(f'启用插件 {plugin_name} 失败, 详细信息请参见日志')
# 修改默认参数处理
@disable_plugin.args_parser
async def parse(bot: Bot, event: MessageEvent, state: T_State):
args = event.get_plaintext().strip()
if not args:
await disable_plugin.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == '取消':
await disable_plugin.finish('操作已取消')
@disable_plugin.handle()
async def handle_first_receive(bot: Bot, event: MessageEvent, state: T_State):
args = event.get_plaintext().strip()
if args:
state['plugin_name'] = args
else:
plugin_list = get_loaded_plugins()
# 提示的时候仅显示有 matcher 的插件
plugin_msg = '\n'.join([x.name for x in plugin_list if len(x.matcher) > 0])
msg = f'当前已加载的插件有:\n\n{plugin_msg}'
await disable_plugin.send(msg)
@disable_plugin.got('plugin_name', prompt='请输入需要禁用的插件名称:')
async def handle_disable_plugin(bot: Bot, event: MessageEvent, state: T_State):
plugin_name = state['plugin_name']
plugin_list = [x.name for x in get_loaded_plugins()]
if plugin_name not in plugin_list:
await disable_plugin.reject('没有这个插件, 请重新输入需要禁用的插件名称:\n取消操作请发送【取消】')
result = await DBPlugin(plugin_name=plugin_name).update(enabled=0)
if result.success():
logger.success(f'OPM | Success enabled plugin {plugin_name} by user {event.user_id}')
await disable_plugin.finish(f'禁用插件 {plugin_name} 成功')
else:
logger.error(f'OPM | Failed to enable {plugin_name}, {result.info}')
await disable_plugin.finish(f'禁用插件 {plugin_name} 失败, 详细信息请参见日志')
# 显示所有插件状态
@list_plugins.handle()
async def handle_list_plugins(bot: Bot, event: MessageEvent, state: T_State):
# 只显示有 matcher 的插件信息
plugin_list = [plugin.name for plugin in get_loaded_plugins() if len(plugin.matcher) > 0]
# 获取插件启用状态
enabled_plugins_result = await DBPlugin.list_plugins(enabled=1)
disabled_plugins_result = await DBPlugin.list_plugins(enabled=0)
if enabled_plugins_result.error or disabled_plugins_result.error:
logger.error(f'OPM | Getting plugins info failed, '
f'enabled result: {enabled_plugins_result}, disabled result: {disabled_plugins_result}')
await list_plugins.finish('获取插件信息失败, 详细信息请参见日志')
enabled_plugins = '\n'.join(x for x in enabled_plugins_result.result if x in plugin_list)
disabled_plugins = '\n'.join(x for x in disabled_plugins_result.result if x in plugin_list)
text = f'已启用的插件:\n{"="*12}\n{enabled_plugins}\n\n\n被禁用的插件:\n{"="*12}\n{disabled_plugins}'
text_img_result = await TextUtils(text=text).text_to_img()
if text_img_result.error:
logger.error(f'OPM | Generate plugins list image failed, {text_img_result.info}')
await list_plugins.finish('生成插件信息失败, 详细信息请参见日志')
img_seg = MessageSegment.image(file=pathlib.Path(text_img_result.result).as_uri())
await list_plugins.finish(img_seg)
|
examples/LCD/main.py | ccccmagicboy2022/pikascript | 228 | 11113439 | <filename>examples/LCD/main.py
from PikaObj import *
import PikaStdLib
import PikaPiZero
import STM32G0
lcd = PikaPiZero.LCD()
lcd.init()
lcd.clear('white')
mem = PikaStdLib.MemChecker()
key = PikaPiZero.KEY()
key.init()
time = STM32G0.Time()
h = 10
w = 10
x = 10
y = 10
x_last = x
y_last = y
is_update = 0
print('mem used max:')
mem.max()
lcd.fill(x, y, w, h, 'blue')
while True:
key_val = key.get()
if key_val != -1:
x_last = x
y_last = y
is_update = 1
if key_val == 0:
x = x + 5
if key_val == 1:
y = y - 5
if key_val == 2:
y = y + 5
if key_val == 3:
x = x - 5
if is_update:
is_update = 0
lcd.fill(x_last, y_last, w, h, 'white')
lcd.fill(x, y, w, h, 'blue')
|
tests/ut/python/dataset/test_random_lighting.py | PowerOlive/mindspore | 3,200 | 11113441 | <gh_stars>1000+
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomLighting op in DE
"""
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.vision.c_transforms as C
from mindspore import log as logger
from util import visualize_list, diff_mse, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers
DATA_DIR = "../data/dataset/testImageNetData/train/"
MNIST_DATA_DIR = "../data/dataset/testMnistData"
GENERATE_GOLDEN = False
def test_random_lighting_py(alpha=1, plot=False):
"""
Feature: RandomLighting
Description: test RandomLighting python op
Expectation: equal results
"""
logger.info("Test RandomLighting python op")
# Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
ds_original = data.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original.create_tuple_iterator(num_epochs=1, output_numpy=True)):
if idx == 0:
images_original = np.transpose(image, (0, 2, 3, 1))
else:
images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0)
# Random Lighting Adjusted Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
alpha = alpha if alpha is not None else 0.05
py_op = F.RandomLighting(alpha)
transforms_random_lighting = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
py_op,
F.ToTensor()])
ds_random_lighting = data.map(operations=transforms_random_lighting, input_columns="image")
ds_random_lighting = ds_random_lighting.batch(512)
for idx, (image, _) in enumerate(ds_random_lighting.create_tuple_iterator(num_epochs=1, output_numpy=True)):
if idx == 0:
images_random_lighting = np.transpose(image, (0, 2, 3, 1))
else:
images_random_lighting = np.append(images_random_lighting, np.transpose(image, (0, 2, 3, 1)), axis=0)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_random_lighting[i], images_original[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_original, images_random_lighting)
def test_random_lighting_py_md5():
"""
Feature: RandomLighting
Description: test RandomLighting python op with md5 comparison
Expectation: same MD5
"""
logger.info("Test RandomLighting python op with md5 comparison")
original_seed = config_get_set_seed(140)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations
transforms = [
F.Decode(),
F.Resize((224, 224)),
F.RandomLighting(1),
F.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
# Generate dataset
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=transform, input_columns=["image"])
# check results with md5 comparison
filename = "random_lighting_py_01_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore configuration
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_lighting_c(alpha=1, plot=False):
"""
Feature: RandomLighting
Description: test RandomLighting cpp op
Expectation: equal results from Mindspore and benchmark
"""
logger.info("Test RandomLighting cpp op")
# Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize((224, 224))]
ds_original = data.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original.create_tuple_iterator(num_epochs=1, output_numpy=True)):
if idx == 0:
images_original = image
else:
images_original = np.append(images_original, image, axis=0)
# Random Lighting Adjusted Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
alpha = alpha if alpha is not None else 0.05
c_op = C.RandomLighting(alpha)
transforms_random_lighting = [C.Decode(), C.Resize((224, 224)), c_op]
ds_random_lighting = data.map(operations=transforms_random_lighting, input_columns="image")
ds_random_lighting = ds_random_lighting.batch(512)
for idx, (image, _) in enumerate(ds_random_lighting.create_tuple_iterator(num_epochs=1, output_numpy=True)):
if idx == 0:
images_random_lighting = image
else:
images_random_lighting = np.append(images_random_lighting, image, axis=0)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_random_lighting[i], images_original[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_original, images_random_lighting)
def test_random_lighting_c_py(alpha=1, plot=False):
"""
Feature: RandomLighting
Description: test Random Lighting Cpp and Python Op
Expectation: equal results from Cpp and Python
"""
logger.info("Test RandomLighting Cpp and python Op")
# RandomLighting Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((200, 300))], input_columns=["image"])
python_op = F.RandomLighting(alpha)
c_op = C.RandomLighting(alpha)
transforms_op = mindspore.dataset.transforms.py_transforms.Compose([lambda img: F.ToPIL()(img.astype(np.uint8)),
python_op,
np.array])
ds_random_lighting_py = data.map(operations=transforms_op, input_columns="image")
ds_random_lighting_py = ds_random_lighting_py.batch(512)
for idx, (image, _) in enumerate(ds_random_lighting_py.create_tuple_iterator(num_epochs=1, output_numpy=True)):
if idx == 0:
images_random_lighting_py = image
else:
images_random_lighting_py = np.append(images_random_lighting_py, image, axis=0)
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((200, 300))], input_columns=["image"])
ds_images_random_lighting_c = data.map(operations=c_op, input_columns="image")
ds_random_lighting_c = ds_images_random_lighting_c.batch(512)
for idx, (image, _) in enumerate(ds_random_lighting_c.create_tuple_iterator(num_epochs=1, output_numpy=True)):
if idx == 0:
images_random_lighting_c = image
else:
images_random_lighting_c = np.append(images_random_lighting_c, image, axis=0)
num_samples = images_random_lighting_c.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_random_lighting_c[i], images_random_lighting_py[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_random_lighting_c, images_random_lighting_py, visualize_mode=2)
def test_random_lighting_invalid_params():
"""
Feature: RandomLighting
Description: test RandomLighting with invalid input parameters
Expectation: throw ValueError or TypeError
"""
logger.info("Test RandomLighting with invalid input parameters.")
with pytest.raises(ValueError) as error_info:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)),
C.RandomLighting(-2)], input_columns=["image"])
assert "Input alpha is not within the required interval of [0, 16777216]." in str(error_info.value)
with pytest.raises(TypeError) as error_info:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)),
C.RandomLighting('1')], input_columns=["image"])
err_msg = "Argument alpha with value 1 is not of type [<class 'float'>, <class 'int'>], but got <class 'str'>."
assert err_msg in str(error_info.value)
if __name__ == "__main__":
test_random_lighting_py()
test_random_lighting_py_md5()
test_random_lighting_c()
test_random_lighting_c_py()
test_random_lighting_invalid_params()
|
rls/envs/wrappers/vec.py | StepNeverStop/RLs | 371 | 11113489 | #!/usr/bin/env python3
# encoding: utf-8
from typing import Dict, List, Tuple, Union
class VECEnv:
def __init__(self, n, env_fn, config: Dict = {}):
self.idxs = list(range(n))
self._envs = [env_fn(idx, **config) for idx in self.idxs]
def run(self, attr: str, params: Union[Tuple, List, Dict] = dict(args=(), kwargs=dict()), idxs=None):
idxs = (idxs,) if isinstance(idxs, int) else idxs
idxs = self.idxs if idxs is None else idxs
rets = []
if isinstance(params, dict):
params = [params] * len(idxs)
for i, idx in enumerate(idxs):
attr, data = attr, params[i]
attrs = attr.split('.')
obj = self._envs[idx]
for attr in attrs:
obj = getattr(obj, attr)
if hasattr(obj, '__call__'):
ret = obj(*data.get('args', ()), **data.get('kwargs', {}))
else:
ret = obj
rets.append(ret)
return rets
|
twistedcaldav/dropbox.py | backwardn/ccs-calendarserver | 462 | 11113490 | ##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Implements drop-box functionality. A drop box is an external attachment store.
"""
__all__ = [
"DropBoxHomeResource",
"DropBoxCollectionResource",
]
from twext.python.log import Logger
from txweb2 import responsecode
from txdav.xml import element as davxml
from txweb2.dav.http import ErrorResponse
from txweb2.dav.resource import DAVResource, TwistedACLInheritable
from txweb2.dav.util import joinURL
from twisted.internet.defer import inlineCallbacks, returnValue
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
log = Logger()
class DropBoxHomeResource (DAVResource):
"""
Drop box collection resource.
"""
def resourceType(self):
return davxml.ResourceType.dropboxhome # @UndefinedVariable
def isCollection(self):
return True
def http_PUT(self, request):
return responsecode.FORBIDDEN
@inlineCallbacks
def accessControlList(self, request, *args, **kwargs):
"""
Override this to give write proxies DAV:write-acl privilege so they can add attachments too.
"""
acl = (yield super(DropBoxHomeResource, self).accessControlList(request, *args, **kwargs))
if config.EnableProxyPrincipals:
owner = (yield self.ownerPrincipal(request))
newaces = tuple(acl.children)
newaces += (
# DAV:write-acl access for this principal's calendar-proxy-write users.
davxml.ACE(
davxml.Principal(davxml.HRef(joinURL(owner.principalURL(), "calendar-proxy-write/"))),
davxml.Grant(
davxml.Privilege(davxml.WriteACL()),
),
davxml.Protected(),
TwistedACLInheritable(),
),
)
returnValue(davxml.ACL(*newaces))
else:
returnValue(acl)
class DropBoxCollectionResource (DAVResource):
"""
Drop box resource.
"""
def resourceType(self):
return davxml.ResourceType.dropbox # @UndefinedVariable
def isCollection(self):
return True
def writeNewACEs(self, newaces):
"""
Write a new ACL to the resource's property store. We override this for calendar collections
and force all the ACEs to be inheritable so that all calendar object resources within the
calendar collection have the same privileges unless explicitly overridden. The same applies
to drop box collections as we want all resources (attachments) to have the same privileges as
the drop box collection.
@param newaces: C{list} of L{ACE} for ACL being set.
"""
# Add inheritable option to each ACE in the list
edited_aces = []
for ace in newaces:
if TwistedACLInheritable() not in ace.children:
children = list(ace.children)
children.append(TwistedACLInheritable())
edited_aces.append(davxml.ACE(*children))
else:
edited_aces.append(ace)
# Do inherited with possibly modified set of aces
return super(DropBoxCollectionResource, self).writeNewACEs(edited_aces)
def http_PUT(self, request):
return ErrorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "valid-drop-box"),
"Cannot store resources in dropbox",
)
|
influx-test/writerow.py | 1514louluo/influx-proxy | 130 | 11113491 | <reponame>1514louluo/influx-proxy<gh_stars>100-1000
import time
import requests
import multiprocessing as mlp
from mytime import mytime
def url(precision='ns'):
return 'http://localhost:7076/write?db=test&precision='+precision
def send(m, precision, number, init_time):
t = init_time.t_p(precision)
for i in range(number):
d = m + ' fd=0 ' + str(t+i)
r = requests.post(url(precision), d)
if i % 500 == 0:
time.sleep(1)
print(m +' '+precision+' '+str(i))
def main():
init_time = mytime(2015,1,1,0,0,0)
thread_list = []
thread_list.append(
mlp.Process(target=send, args=('cpu,pr=s', 's', 100000, init_time)))
thread_list.append(
mlp.Process(target=send, args=('cpu,pr=h', 'h', 88888, init_time)))
thread_list.append(
mlp.Process(target=send, args=('mem,pr=m', 'm', 130000, init_time)))
thread_list.append(
mlp.Process(target=send, args=('mem,pr=ns', 'ns', 77777, init_time)))
thread_list.append(
mlp.Process(target=send, args=('mms', 'ms', 111111, init_time)))
thread_list.append(
mlp.Process(target=send, args=('mus', 'us', 122222, init_time)))
return thread_list
if __name__ == '__main__':
thread_list = main()
for thr in thread_list:
thr.start()
for thr in thread_list:
thr.join()
print('write over')
|
components/isceobj/IsceProc/runRgoffset_ampcor.py | vincentschut/isce2 | 1,133 | 11113508 | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# United States Government Sponsorship acknowledged. This software is subject to
# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'
# (No [Export] License Required except when exporting to an embargoed country,
# end user, or in support of a prohibited end use). By downloading this software,
# the user agrees to comply with all applicable U.S. export laws and regulations.
# The user has the responsibility to obtain export licenses, or other export
# authority as may be required before exporting this software to any 'EAR99'
# embargoed foreign country or citizen of those countries.
#
# Author: <NAME>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Comment: Adapted from InsarProc/runRgoffsetprf_ampcor.py
import logging
import isceobj
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
from mroipac.ampcor.Ampcor import Ampcor
from isceobj import Constants as CN
logger = logging.getLogger('isce.isceProc.runRgoffset')
def runRgoffset(self):
infos = {}
for attribute in ['firstSampleAcrossPrf', 'firstSampleDownPrf', 'numberLocationAcrossPrf', 'numberLocationDownPrf']:
infos[attribute] = getattr(self._isce, attribute)
for attribute in ['sensorName', 'offsetSearchWindowSize']:
infos[attribute] = getattr(self, attribute)
stdWriter = self._stdWriter
refPol = self._isce.refPol
refScene = self._isce.refScene
imageSim = self._isce.simAmpImage
sceneid1, sceneid2 = self._isce.pairsToCoreg[0]
if sceneid1 != refScene:
sys.exit("runRgoffset: should have refScene here!")
#refScene should always be the first scene in each pair of pairsToCoreg (reference strategy)
pairRef = None #pair with refScene in it
for pair in self._isce.selectedPairs:
if refScene == pair[0]:
# refScene is first scene of pair (=> band 0 of imageAmp)
bandRef = 0
pairRef = pair
break
if refScene == pair[1]:
# refScene is second scene of pair (=> band 1 of imageAmp)
bandRef = 1
pairRef = pair
if pairRef is None:
sys.exit("runRgoffset: refScene not in any selected pairs!")
# can happen if refScene was used to coregister only but no pair was formed with it
imageAmp = self._isce.resampAmpImages[pairRef][refPol]
prf = self._isce.frames[refScene][refPol].getInstrument().getPulseRepetitionFrequency()
fs1 = self._isce.frames[refScene][refPol].getInstrument().getRangeSamplingRate()
sid = self._isce.formatname(refScene)
infos['outputPath'] = self.getoutputdir(refScene)
catalog = isceobj.Catalog.createCatalog(self._isce.procDoc.name)
offsetField = run(imageAmp, imageSim, bandRef, prf, fs1, infos, stdWriter, catalog=catalog, sceneid=sid)
self._isce.procDoc.addAllFromCatalog(catalog)
# assign the same offsetField to all pairs in pairsToCoreg (will be used by runOffoutliers)
for pair in self._isce.pairsToCoreg:
self._isce.offsetFields[pair] = offsetField
self._isce.refinedOffsetFields[pair] = offsetField
def run(imageAmp, imageSim, numBand, prf, fs1, infos, stdWriter, catalog=None, sceneid='NO_ID'):
#fs1: range sampling rate
firstAc = infos['firstSampleAcrossPrf']
firstDown = infos['firstSampleDownPrf']
numLocationAcross = infos['numberLocationAcrossPrf']
numLocationDown = infos['numberLocationDownPrf']
coarseAcross = 0
coarseDown = 0
#Fake amplitude image as a complex image
objAmp = isceobj.createImage()
objAmp.setAccessMode('read')
objAmp.dataType = 'CFLOAT'
objAmp.bands = 1
objAmp.setFilename(imageAmp.filename)
objAmp.setWidth(imageAmp.width)
objAmp.createImage()
widthAmp = objAmp.getWidth()
intLength = objAmp.getLength()
objSim = isceobj.createImage()
objSim.setFilename(imageSim.filename)
objSim.setWidth(imageSim.width)
objSim.dataType='FLOAT'
objSim.setAccessMode('read')
objSim.createImage()
# check if it's correct
delRg1 = CN.SPEED_OF_LIGHT / (2*fs1)
objAmpcor = Ampcor()
objAmpcor.setImageDataType1('real')
objAmpcor.setImageDataType2('complex')
####Adjust first and last values using window sizes
xMargin = 2*objAmpcor.searchWindowSizeWidth + objAmpcor.windowSizeWidth
yMargin = 2*objAmpcor.searchWindowSizeHeight + objAmpcor.windowSizeHeight
offAc = max(firstAc, -coarseAcross) + xMargin
offDn = max(firstDown, -coarseDown) + yMargin
lastAc = int(min(widthAmp, widthAmp-offAc) - xMargin)
lastDn = int(min(intLength, intLength-offDn) - yMargin)
print(xMargin, yMargin)
print(offAc, lastAc)
print(offDn, lastDn)
objAmpcor.setFirstSampleAcross(offAc)
objAmpcor.setLastSampleAcross(lastAc)
objAmpcor.setNumberLocationAcross(numLocationAcross)
objAmpcor.setFirstSampleDown(offDn)
objAmpcor.setLastSampleDown(lastDn)
objAmpcor.setNumberLocationDown(numLocationDown)
#set the tag used in the outfile. each message is preceded by this tag
#if the writer is not of "file" type the call has no effect
objAmpcor.stdWriter = stdWriter.set_file_tags("rgoffset",
"log",
"err",
"out")
objAmpcor.setFirstPRF(prf)
objAmpcor.setSecondPRF(prf)
objAmpcor.setAcrossGrossOffset(coarseAcross)
objAmpcor.setDownGrossOffset(coarseDown)
objAmpcor.setFirstRangeSpacing(delRg1)
objAmpcor.setSecondRangeSpacing(delRg1)
objAmpcor.ampcor(objSim,objAmp)
if catalog is not None:
# Record the inputs and outputs
isceobj.Catalog.recordInputsAndOutputs(catalog, objAmpcor,
"runRgoffset_ampcor.%s" % sceneid,
logger,
"runRgoffset_ampcor.%s" % sceneid)
objAmp.finalizeImage()
objSim.finalizeImage()
return objAmpcor.getOffsetField()
|
tests/multi_net/uasyncio_tcp_close_write.py | sebastien-riou/micropython | 13,648 | 11113512 | <gh_stars>1000+
# Test uasyncio TCP stream closing then writing
try:
import uasyncio as asyncio
except ImportError:
try:
import asyncio
except ImportError:
print("SKIP")
raise SystemExit
PORT = 8000
async def handle_connection(reader, writer):
# Write data to ensure connection
writer.write(b"x")
await writer.drain()
# Read, should return nothing
print("read:", await reader.read(100))
# Close connection
print("close")
writer.close()
await writer.wait_closed()
print("done")
ev.set()
async def tcp_server():
global ev
ev = asyncio.Event()
server = await asyncio.start_server(handle_connection, "0.0.0.0", PORT)
print("server running")
multitest.next()
async with server:
await asyncio.wait_for(ev.wait(), 5)
async def tcp_client():
reader, writer = await asyncio.open_connection(IP, PORT)
# Read data to ensure connection
print("read:", await reader.read(1))
# Close connection
print("close")
writer.close()
await writer.wait_closed()
# Try writing data to the closed connection
print("write")
try:
writer.write(b"x")
await writer.drain()
except OSError:
print("OSError")
def instance0():
multitest.globals(IP=multitest.get_network_ip())
asyncio.run(tcp_server())
def instance1():
multitest.next()
asyncio.run(tcp_client())
|
examples/pxScene2d/external/libnode-v10.15.3/test/addons-napi/test_env_sharing/binding.gyp | madanagopaltcomcast/pxCore | 1,144 | 11113513 | <gh_stars>1000+
{
"targets": [
{
"target_name": "store_env",
"sources": [ "store_env.c" ]
},
{
"target_name": "compare_env",
"sources": [ "compare_env.c" ]
}
]
}
|
ci/docker/docker-in-docker-image/publish-docker-image/publish_docker_image/command.py | bugtsa/avito-android | 347 | 11113535 | import os
import fire
import yaml
from .emulator import publish_emulator
from .docker import publish, test
CONFIG_FILE = 'image.yaml'
# noinspection PyClassHasNoInit
class Command:
@staticmethod
def publish(directory):
config_file_path = Command._check_configuration(directory)
with open(config_file_path, 'r') as config_file:
config = yaml.load(config_file, Loader=yaml.SafeLoader)
docker_registry = config['registry']
if docker_registry == 'DOCKER_REGISTRY':
docker_registry = os.getenv('DOCKER_REGISTRY', '___MISSED_DOCKER_REGISTRY_ENV___')
publish(directory, docker_registry, config['image'])
@staticmethod
def publish_emulator(directory, emulators):
emulators_to_publish = str(emulators).split()
print('Emulators for publishing: {emulators}'.format(emulators=emulators_to_publish))
config_file_path = Command._check_configuration(directory)
with open(config_file_path, 'r') as config_file:
config = yaml.load(config_file, Loader=yaml.SafeLoader)
docker_registry = config['registry']
if docker_registry == 'DOCKER_REGISTRY':
docker_registry = os.getenv('DOCKER_REGISTRY', '___MISSED_DOCKER_REGISTRY_ENV___')
publish_emulator(directory, docker_registry, config['image'], emulators_to_publish)
@staticmethod
def test():
test()
@staticmethod
def _check_configuration(directory):
if not directory:
raise ValueError('Directory is not specified')
config_file_path = os.path.join(directory, CONFIG_FILE)
if not os.path.isfile(config_file_path):
raise ValueError('Configuration file not found in specified directory')
return config_file_path
if __name__ == '__main__':
fire.Fire(Command)
|
webdataset/tests/test_utils.py | techthiyanes/webdataset | 389 | 11113640 | from webdataset import utils
def test_repeatedly():
assert len(list(utils.repeatedly(range(3), nepochs=7))) == 21
def test_repeatedly2():
assert len(list(utils.repeatedly(range(3), nbatches=10))) == 10
def test_repeatedly3():
assert len(list(utils.repeatedly([[[1, 1], [2, 2]]] * 3, nsamples=10))) == 5
|
AE_Datasets/O_A/datasets/__init__.py | MasterXin2020/DL-based-Intelligent-Diagnosis-Benchmark | 200 | 11113667 | <reponame>MasterXin2020/DL-based-Intelligent-Diagnosis-Benchmark<filename>AE_Datasets/O_A/datasets/__init__.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
from AE_Datasets.O_A.datasets.CWRU import CWRU
from AE_Datasets.O_A.datasets.CWRUFFT import CWRUFFT
from AE_Datasets.O_A.datasets.CWRUCWT import CWRUCWT
from AE_Datasets.O_A.datasets.CWRUSTFT import CWRUSTFT
from AE_Datasets.O_A.datasets.CWRUSlice import CWRUSlice
from AE_Datasets.O_A.datasets.MFPT import MFPT
from AE_Datasets.O_A.datasets.MFPTFFT import MFPTFFT
from AE_Datasets.O_A.datasets.MFPTCWT import MFPTCWT
from AE_Datasets.O_A.datasets.MFPTSTFT import MFPTSTFT
from AE_Datasets.O_A.datasets.MFPTSlice import MFPTSlice
from AE_Datasets.O_A.datasets.PU import PU
from AE_Datasets.O_A.datasets.PUFFT import PUFFT
from AE_Datasets.O_A.datasets.PUCWT import PUCWT
from AE_Datasets.O_A.datasets.PUSTFT import PUSTFT
from AE_Datasets.O_A.datasets.PUSlice import PUSlice
from AE_Datasets.O_A.datasets.SEU import SEU
from AE_Datasets.O_A.datasets.SEUFFT import SEUFFT
from AE_Datasets.O_A.datasets.SEUCWT import SEUCWT
from AE_Datasets.O_A.datasets.SEUSTFT import SEUSTFT
from AE_Datasets.O_A.datasets.SEUSlice import SEUSlice
from AE_Datasets.O_A.datasets.UoC import UoC
from AE_Datasets.O_A.datasets.UoCFFT import UoCFFT
from AE_Datasets.O_A.datasets.UoCCWT import UoCCWT
from AE_Datasets.O_A.datasets.UoCSTFT import UoCSTFT
from AE_Datasets.O_A.datasets.UoCSlice import UoCSlice
from AE_Datasets.O_A.datasets.JNU import JNU
from AE_Datasets.O_A.datasets.JNUFFT import JNUFFT
from AE_Datasets.O_A.datasets.JNUCWT import JNUCWT
from AE_Datasets.O_A.datasets.JNUSTFT import JNUSTFT
from AE_Datasets.O_A.datasets.JNUSlice import JNUSlice
from AE_Datasets.O_A.datasets.XJTU import XJTU
from AE_Datasets.O_A.datasets.XJTUFFT import XJTUFFT
from AE_Datasets.O_A.datasets.XJTUCWT import XJTUCWT
from AE_Datasets.O_A.datasets.XJTUSTFT import XJTUSTFT
from AE_Datasets.O_A.datasets.XJTUSlice import XJTUSlice |
tests/ex01_compose_v2/models.py | RodrigoDeRosa/related | 190 | 11113670 | import related
@related.immutable
class Service(object):
name = related.StringField()
image = related.StringField(required=False)
build = related.StringField(required=False)
ports = related.SequenceField(str, required=False)
volumes = related.SequenceField(str, required=False)
command = related.StringField(required=False)
@related.immutable
class Compose(object):
version = related.StringField(required=False, default=None)
services = related.MappingField(Service, "name", required=False)
|
the-basic-mq/python/app.py | mttfarmer/serverless | 1,627 | 11113674 | #!/usr/bin/env python3
from aws_cdk import core
from the_basic_mq.the_basic_mq_stack import TheBasicMQStack
app = core.App()
TheBasicMQStack(app, "TheBasicMQStack", env=core.Environment(region="us-east-1"))
app.synth()
|
mastermind/http.py | gap892003/mastermind | 389 | 11113678 | from __future__ import (absolute_import, print_function, division)
from mitmproxy.models import Headers, HTTPResponse
status_codes = {100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'}
def status_message(code):
return status_codes[code]
def response(code, body=b'', headers=None):
# http://docs.python-guide.org/en/latest/writing/gotchas/#mutable-default-arguments
if headers is None:
headers = Headers()
return HTTPResponse('HTTP/1.1',
code,
status_message(code),
headers, body)
|
docker/build_files/entrypoint-nvidia.py | PeihongYu/surreal | 471 | 11113697 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import sys
import shlex
import shutil
def init():
os.system('Xdummy > /dev/null 2>&1 &')
if os.path.exists('/etc/secrets/mjkey.txt'):
shutil.copy('/etc/secrets/mjkey.txt', '/root/.mujoco/mjkey.txt')
os.system('python -c "import mujoco_py"')
os.environ['DISPLAY'] = ':0'
def _run_cmd_list(args):
if len(args) == 1:
os.system(args[0])
else: # docker run
os.system(' '.join(map(shlex.quote, args)))
init()
_run_cmd_list(sys.argv[1:])
|
ch13/ch13_part2.py | ericgarza70/machine-learning-book | 655 | 11113698 | <filename>ch13/ch13_part2.py
# coding: utf-8
import sys
from python_environment_check import check_packages
import numpy as np
import torch
import torch.nn as nn
import pandas as pd
import sklearn
import sklearn.model_selection
from torch.nn.functional import one_hot
from torch.utils.data import DataLoader, TensorDataset
import torchvision
from torchvision import transforms
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'numpy': '1.21.2',
'pandas': '1.3.2',
'sklearn': '1.0',
'torch': '1.8',
'torchvision': '0.9.0'
}
check_packages(d)
# # Chapter 13: Going Deeper -- the Mechanics of PyTorch (Part 2/3)
# **Outline**
#
# - [Project one - predicting the fuel efficiency of a car](#Project-one----predicting-the-fuel-efficiency-of-a-car)
# - [Working with feature columns](#Working-with-feature-columns)
# - [Training a DNN regression model](#Training-a-DNN-regression-model)
# - [Project two - classifying MNIST handwritten digits](#Project-two----classifying-MNIST-handwritten-digits)
# ## Project one - predicting the fuel efficiency of a car
#
# ### Working with feature columns
#
#
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
df = pd.read_csv(url, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
df.tail()
print(df.isna().sum())
df = df.dropna()
df = df.reset_index(drop=True)
df.tail()
df_train, df_test = sklearn.model_selection.train_test_split(df, train_size=0.8, random_state=1)
train_stats = df_train.describe().transpose()
train_stats
numeric_column_names = ['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration']
df_train_norm, df_test_norm = df_train.copy(), df_test.copy()
for col_name in numeric_column_names:
mean = train_stats.loc[col_name, 'mean']
std = train_stats.loc[col_name, 'std']
df_train_norm.loc[:, col_name] = (df_train_norm.loc[:, col_name] - mean)/std
df_test_norm.loc[:, col_name] = (df_test_norm.loc[:, col_name] - mean)/std
df_train_norm.tail()
boundaries = torch.tensor([73, 76, 79])
v = torch.tensor(df_train_norm['Model Year'].values)
df_train_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
v = torch.tensor(df_test_norm['Model Year'].values)
df_test_norm['Model Year Bucketed'] = torch.bucketize(v, boundaries, right=True)
numeric_column_names.append('Model Year Bucketed')
total_origin = len(set(df_train_norm['Origin']))
origin_encoded = one_hot(torch.from_numpy(df_train_norm['Origin'].values) % total_origin)
x_train_numeric = torch.tensor(df_train_norm[numeric_column_names].values)
x_train = torch.cat([x_train_numeric, origin_encoded], 1).float()
origin_encoded = one_hot(torch.from_numpy(df_test_norm['Origin'].values) % total_origin)
x_test_numeric = torch.tensor(df_test_norm[numeric_column_names].values)
x_test = torch.cat([x_test_numeric, origin_encoded], 1).float()
y_train = torch.tensor(df_train_norm['MPG'].values).float()
y_test = torch.tensor(df_test_norm['MPG'].values).float()
train_ds = TensorDataset(x_train, y_train)
batch_size = 8
torch.manual_seed(1)
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
hidden_units = [8, 4]
input_size = x_train.shape[1]
all_layers = []
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 1))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 200
log_epochs = 20
for epoch in range(num_epochs):
loss_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)[:, 0]
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_hist_train += loss.item()
if epoch % log_epochs==0:
print(f'Epoch {epoch} Loss {loss_hist_train/len(train_dl):.4f}')
with torch.no_grad():
pred = model(x_test.float())[:, 0]
loss = loss_fn(pred, y_test)
print(f'Test MSE: {loss.item():.4f}')
print(f'Test MAE: {nn.L1Loss()(pred, y_test).item():.4f}')
# ## Project two - classifying MNIST hand-written digits
image_path = './'
transform = transforms.Compose([transforms.ToTensor()])
mnist_train_dataset = torchvision.datasets.MNIST(root=image_path,
train=True,
transform=transform,
download=True)
mnist_test_dataset = torchvision.datasets.MNIST(root=image_path,
train=False,
transform=transform,
download=False)
batch_size = 64
torch.manual_seed(1)
train_dl = DataLoader(mnist_train_dataset, batch_size, shuffle=True)
hidden_units = [32, 16]
image_size = mnist_train_dataset[0][0].shape
input_size = image_size[0] * image_size[1] * image_size[2]
all_layers = [nn.Flatten()]
for hidden_unit in hidden_units:
layer = nn.Linear(input_size, hidden_unit)
all_layers.append(layer)
all_layers.append(nn.ReLU())
input_size = hidden_unit
all_layers.append(nn.Linear(hidden_units[-1], 10))
model = nn.Sequential(*all_layers)
model
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
torch.manual_seed(1)
num_epochs = 20
for epoch in range(num_epochs):
accuracy_hist_train = 0
for x_batch, y_batch in train_dl:
pred = model(x_batch)
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
is_correct = (torch.argmax(pred, dim=1) == y_batch).float()
accuracy_hist_train += is_correct.sum()
accuracy_hist_train /= len(train_dl.dataset)
print(f'Epoch {epoch} Accuracy {accuracy_hist_train:.4f}')
pred = model(mnist_test_dataset.data / 255.)
is_correct = (torch.argmax(pred, dim=1) == mnist_test_dataset.targets).float()
print(f'Test accuracy: {is_correct.mean():.4f}')
# ---
#
# Readers may ignore the next cell.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.