max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/test_year_2013.py | l0pht511/jpholiday | 179 | 11158381 | # coding: utf-8
import datetime
import unittest
import jpholiday
class TestYear2013(unittest.TestCase):
def test_holiday(self):
"""
2013年祝日
"""
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 1, 1)), '元日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 1, 14)), '成人の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 2, 11)), '建国記念の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 3, 20)), '春分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 4, 29)), '昭和の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 5, 3)), '憲法記念日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 5, 4)), 'みどりの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 5, 5)), 'こどもの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 5, 6)), 'こどもの日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 7, 15)), '海の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 9, 16)), '敬老の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 9, 23)), '秋分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 10, 14)), '体育の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 11, 3)), '文化の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 11, 4)), '文化の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 11, 23)), '勤労感謝の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2013, 12, 23)), '天皇誕生日')
def test_count_month(self):
"""
2013年月祝日数
"""
self.assertEqual(len(jpholiday.month_holidays(2013, 1)), 2)
self.assertEqual(len(jpholiday.month_holidays(2013, 2)), 1)
self.assertEqual(len(jpholiday.month_holidays(2013, 3)), 1)
self.assertEqual(len(jpholiday.month_holidays(2013, 4)), 1)
self.assertEqual(len(jpholiday.month_holidays(2013, 5)), 4)
self.assertEqual(len(jpholiday.month_holidays(2013, 6)), 0)
self.assertEqual(len(jpholiday.month_holidays(2013, 7)), 1)
self.assertEqual(len(jpholiday.month_holidays(2013, 8)), 0)
self.assertEqual(len(jpholiday.month_holidays(2013, 9)), 2)
self.assertEqual(len(jpholiday.month_holidays(2013, 10)), 1)
self.assertEqual(len(jpholiday.month_holidays(2013, 11)), 3)
self.assertEqual(len(jpholiday.month_holidays(2013, 12)), 1)
def test_count_year(self):
"""
2013年祝日数
"""
self.assertEqual(len(jpholiday.year_holidays(2013)), 17)
|
cacreader/swig-4.0.2/Examples/test-suite/python/nested_in_template_runme.py | kyletanyag/LL-Smartcard | 1,031 | 11158384 | <gh_stars>1000+
from nested_in_template import *
cd = ConcreteDerived(88)
if cd.m_value != 88:
raise RuntimeError("ConcreteDerived not created correctly")
|
aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/CreateBackupPlanRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 11158410 | <filename>aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/CreateBackupPlanRequest.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class CreateBackupPlanRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'CreateBackupPlan','hbr')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientId(self):
return self.get_body_params().get('ClientId')
def set_ClientId(self,ClientId):
self.add_body_params('ClientId', ClientId)
def get_VaultId(self):
return self.get_query_params().get('VaultId')
def set_VaultId(self,VaultId):
self.add_query_param('VaultId',VaultId)
def get_Prefix(self):
return self.get_query_params().get('Prefix')
def set_Prefix(self,Prefix):
self.add_query_param('Prefix',Prefix)
def get_Paths(self):
return self.get_body_params().get('Path')
def set_Paths(self, Paths):
for depth1 in range(len(Paths)):
if Paths[depth1] is not None:
self.add_body_params('Path.' + str(depth1 + 1) , Paths[depth1])
def get_PlanName(self):
return self.get_query_params().get('PlanName')
def set_PlanName(self,PlanName):
self.add_query_param('PlanName',PlanName)
def get_Options(self):
return self.get_body_params().get('Options')
def set_Options(self,Options):
self.add_body_params('Options', Options)
def get_SourceType(self):
return self.get_query_params().get('SourceType')
def set_SourceType(self,SourceType):
self.add_query_param('SourceType',SourceType)
def get_Exclude(self):
return self.get_body_params().get('Exclude')
def set_Exclude(self,Exclude):
self.add_body_params('Exclude', Exclude)
def get_BackupType(self):
return self.get_query_params().get('BackupType')
def set_BackupType(self,BackupType):
self.add_query_param('BackupType',BackupType)
def get_Retention(self):
return self.get_query_params().get('Retention')
def set_Retention(self,Retention):
self.add_query_param('Retention',Retention)
def get_FileSystemId(self):
return self.get_query_params().get('FileSystemId')
def set_FileSystemId(self,FileSystemId):
self.add_query_param('FileSystemId',FileSystemId)
def get_Include(self):
return self.get_body_params().get('Include')
def set_Include(self,Include):
self.add_body_params('Include', Include)
def get_CreateTime(self):
return self.get_query_params().get('CreateTime')
def set_CreateTime(self,CreateTime):
self.add_query_param('CreateTime',CreateTime)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_Bucket(self):
return self.get_query_params().get('Bucket')
def set_Bucket(self,Bucket):
self.add_query_param('Bucket',Bucket)
def get_Schedule(self):
return self.get_query_params().get('Schedule')
def set_Schedule(self,Schedule):
self.add_query_param('Schedule',Schedule)
def get_InstanceId(self):
return self.get_body_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_body_params('InstanceId', InstanceId)
def get_SpeedLimit(self):
return self.get_body_params().get('SpeedLimit')
def set_SpeedLimit(self,SpeedLimit):
self.add_body_params('SpeedLimit', SpeedLimit)
def get_Detail(self):
return self.get_query_params().get('Detail')
def set_Detail(self,Detail):
self.add_query_param('Detail',Detail)
def get_BackupSourceGroupId(self):
return self.get_query_params().get('BackupSourceGroupId')
def set_BackupSourceGroupId(self,BackupSourceGroupId):
self.add_query_param('BackupSourceGroupId',BackupSourceGroupId)
def get_UdmRegionId(self):
return self.get_query_params().get('UdmRegionId')
def set_UdmRegionId(self,UdmRegionId):
self.add_query_param('UdmRegionId',UdmRegionId) |
algorithms/permutation-equation.py | gajubadge11/HackerRank-1 | 340 | 11158419 | #!/usr/bin/env python3
import sys
def permutationEquation(p):
output = []
for num in range(1, max(p)+1):
output.append(p.index(p.index(num)+1)+1)
return output
if __name__ == "__main__":
n = int(input().strip())
p = list(map(int, input().strip().split(' ')))
result = permutationEquation(p)
print ("\n".join(map(str, result)))
|
tests/benchmarking/benchmarking_example.py | YevheniiSemendiak/pytorch-tools | 155 | 11158431 | <gh_stars>100-1000
"""Example of measuring memory consumption and speed in PyTorch"""
import torch
import time
from torch.autograd import Variable
# #### MEMORY ####
def consume_gpu_ram(n):
return torch.ones((n, n)).cuda(0)
def consume_gpu_ram_256mb():
return consume_gpu_ram(2 ** 13)
# should be 1024 peak, 0 used
z = [consume_gpu_ram_256mb() for i in range(4)] # 1GB
del z
print("Peak memory: {}Mb".format(torch.cuda.max_memory_allocated(0) / 2 ** 10 / 2 ** 10))
print("Current memory: {}Mb".format(torch.cuda.memory_allocated(0) / 2 ** 10 / 2 ** 10))
torch.cuda.reset_max_memory_allocated()
# should be: 512 peaked, 256 used
c1 = consume_gpu_ram_256mb()
c2 = consume_gpu_ram_256mb()
del c1
print("Peak memory: {}Mb".format(torch.cuda.max_memory_allocated(0) / 2 ** 10 / 2 ** 10))
print("Current memory: {}Mb".format(torch.cuda.memory_allocated(0) / 2 ** 10 / 2 ** 10))
torch.backends.cudnn.benchmark = False
#### SPEED ####
x = torch.ones((8, 3, 32, 32), requires_grad=True).cuda(0)
conv = torch.nn.Conv2d(3, 64, 5).cuda(0)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
start_cpu = time.time()
y = torch.mean(conv(x))
y.backward()
end_cpu = time.time()
end.record()
torch.cuda.synchronize()
gpu_time = start.elapsed_time(end)
cpu_time = end_cpu - start_cpu
print(f"Gpu msecs: {gpu_time:.3f}. Cpu msecs: {cpu_time * 1e3:.3f}")
|
aruco_detect/scripts/create_markers.py | teosnare/fiducials | 232 | 11158443 | #!/usr/bin/python3
"""
Generate a PDF file containaing one or more fiducial markers for printing
"""
import os, sys, argparse
import subprocess
from marker_generation import genMarker
def checkCmd(cmd, package):
rc = os.system("which %s > /dev/null" % cmd)
if rc != 0:
print("""This utility requires %s. It can be installed by typing:
sudo apt install %s""" % (cmd, package))
sys.exit(1)
if __name__ == "__main__":
checkCmd("pdfunite", "poppler-utils")
checkCmd("cairosvg", "cairosvg python3-cairosvg")
parser = argparse.ArgumentParser(description='Generate Aruco Markers.')
parser.add_argument('startId', type=int,
help='start of marker range to generate')
parser.add_argument('endId', type=int,
help='end of marker range to generate')
parser.add_argument('pdfFile', type=str,
help='file to store markers in')
parser.add_argument('dictionary', type=int, default='7', nargs='?',
help='dictionary to generate from')
parser.add_argument('--paper-size', dest='paper_size', action='store',
default='letter', help='paper size to use (letter or a4)')
args = parser.parse_args()
outfile = args.pdfFile
dicno = args.dictionary
markers = range(args.startId, args.endId + 1)
pdfs = map(lambda i: "/tmp/marker%d.pdf" % i, markers)
if args.paper_size == 'letter':
paper_size = (215.9, 279.4)
elif args.paper_size == 'a4':
paper_size = (210, 297)
try:
# For a parallel version
from joblib import Parallel, delayed
Parallel(n_jobs=-1)(delayed(genMarker)(i, dicno, paper_size) for i in markers)
except ImportError:
# Fallback to serial version
for i in markers:
genMarker(i, dicno, paper_size)
print("Combining into %s" % outfile)
os.system("pdfunite %s %s" % (" ".join(pdfs), outfile))
for f in pdfs:
os.remove(f)
print('\033[91m' + """After printing, please make sure that the long lines around the marker are
EXACTLY 14.0cm long. This is required for accurate position estimation.""" + '\033[0m')
|
theonionbox/tob/system/__init__.py | ralphwetzel/theonionbox | 120 | 11158453 | from typing import Optional
import os
import platform
from collections import deque
import itertools
from psutil import virtual_memory, cpu_percent # to readout the cpu load
from threading import RLock
from ..deviation import getTimer
class BaseSystem(object):
def __init__(self):
self.__user = None
self.lock = RLock()
self.data = deque(maxlen=1000)
# @staticmethod
# def is_temperature_available() -> bool:
# return False
@property
def uptime(self) -> Optional[str]:
return None
@property
def temperature(self) -> Optional[float]:
return None
@property
def system(self) -> str:
return 'Generic'
@property
def ntp(self) -> Optional[str]:
return None
@property
def name(self) -> str:
return platform.node()
@property
def release(self) -> str:
return platform.release()
@property
def version(self) -> str:
return platform.version()
@property
def machine(self) -> str:
return platform.machine()
@property
def processor(self) -> str:
return platform.processor()
@property
def venv(self) -> Optional[str]:
return os.getenv('VIRTUAL_ENV', None)
@property
def user(self) -> Optional[str]:
if self.__user is None:
# Try to load pwd, fallback to getpass if unsuccessful
try:
import pwd
self.__user = pwd.getpwuid(os.geteuid()).pw_name
except ImportError:
try:
import getpass
self.__user = getpass.getuser()
except:
pass
return self.__user
@property
def memory(self) -> Optional[int]:
try:
from psutil import virtual_memory
return virtual_memory().total
except:
return None
@property
def memoryMB(self) -> Optional[int]:
mem = self.memory
if mem is None:
return mem
return int(mem / (1024 ** 2))
def record_performance_data(self):
timestamp = getTimer()() * 1000 # has to be converted to ms as JS expects ms!
# we always catch the current cpu load
cpu = {}
count = 0
# first: overall cpu load:
cpu['c'] = cpu_percent(None, False)
# notice: psutil.cpu_percent() will return a meaningless 0.0 when called for the first time
# this is not nice yet doesn't hurt!
for cx in cpu_percent(None, True):
cpu['c%s' % count] = cx
count += 1
cpu['s'] = timestamp
# ... and the percentage of memory usage
cpu['mp'] = virtual_memory().percent
t = self.temperature
if t is not None:
cpu['t'] = t
# append the data to the list
with self.lock:
self.data.append(cpu)
def get_performance_data(self, after: int = None):
with self.lock:
if after is None or after == 0:
ret = list(self.data)
else:
ret = list(itertools.dropwhile(lambda x: x['s'] < after, self.data))
return ret
def run(self, launch, stop):
return launch()
def get_system_manager(system: str = platform.system()) -> BaseSystem:
if system == 'Darwin':
from .darwin import Darwin
return Darwin()
elif system == 'FreeBSD':
from .freebsd import FreeBSD
return FreeBSD()
elif system == 'Linux':
from .linux import Linux
return Linux()
elif system == 'Windows':
from .windows import Windows
return Windows()
else:
return BaseSystem()
|
sort/insertion_sort/python/akshitgrover_Insertion_Sort.py | CarbonDDR/al-go-rithms | 1,253 | 11158458 | <gh_stars>1000+
n=int(input())
a=[]
for i in range(0,n):
c=0
for j in range(0,3):
c=c+int(input())
a=a+[c]
for i in range(0,len(a)):
j=i-1
h=i
while j>=0 and a[j]<a[h]:
c=a[h]
del a[h]
a.insert(j,c)
j-=1
h-=1
print(a)
|
unit_testing_course/lesson1/task2/tests.py | behzod/pycharm-courses | 213 | 11158485 | import unittest.mock as mock
from custom_test_helpers import check_tests_pass, check_tests_fail, \
reload_module, abort_tests
from test_helper import run_common_tests, test_answer_placeholders_text_deleted, \
passed, failed, import_task_file
if __name__ == '__main__':
from hello_someone import hello_someone
run_common_tests()
test_answer_placeholders_text_deleted()
task_tests_module = import_task_file()
# check that all tests pass
check_tests_pass(task_tests_module)
# check that the function hello_someone() is called at least once by the tests
counting_hello_someone = mock.Mock(wraps=hello_someone)
with mock.patch('{}.{}'.format('hello_someone', 'hello_someone'), counting_hello_someone):
module = import_task_file()
check_tests_pass(module)
if counting_hello_someone.call_count > 0:
passed("Test called the 'hello_someone' function")
else:
failed("Test never called the 'hello_someone' function")
# check that the tests fail on a broken implementation
def broken_hello_someone(someone):
# omit the comma
return "Hello {}!".format(someone)
with mock.patch('{}.{}'.format('hello_someone', 'hello_someone'), broken_hello_someone):
module = import_task_file()
check_tests_fail(module)
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/openid/urls.py | DemarcusL/django_wiki_lab | 6,342 | 11158519 | from django.urls import path
from . import views
urlpatterns = [
path("openid/login/", views.login, name="openid_login"),
path("openid/callback/", views.callback, name="openid_callback"),
]
|
Python3/522.py | rakhi2001/ecom7 | 854 | 11158565 | <filename>Python3/522.py
__________________________________________________________________________________________________
sample 28 ms submission
class Solution:
def findLUSlength(self, strs):
def subseq(src, sub):
i = 0
n = len(sub)
for ch in src:
if i < n and sub[i] == ch:
i += 1
return i == len(sub)
strs.sort(key=len, reverse=True)
for i, sub in enumerate(strs):
for j, src in enumerate(strs):
if i == j or len(src) < len(sub):
continue
if True == subseq(src, sub): # possible not continuous
break
else: # mean this stubstr is longest and no sub
return len(sub)
return -1
__________________________________________________________________________________________________
sample 32 ms submission
class Solution(object):
def findLUSlength(self, strs):
"""
:type strs: List[str]
:rtype: int
"""
def substring_of(s1, s2):
# s2 is longer.
i1 = i2 = 0
while i1 < len(s1) and i2 < len(s2):
if s2[i2] != s1[i1]:
i2 += 1
else:
i1 += 1
i2 += 1
return i1==len(s1)
unique_set = set()
removed_set = set()
for s in strs:
if s in unique_set:
unique_set.remove(s)
removed_set.add(s)
elif s not in removed_set:
unique_set.add(s)
for s in sorted(unique_set, key=len, reverse=True):
for s2 in removed_set:
if len(s2)>len(s) and substring_of(s, s2):
break
else:
return len(s)
return -1
__________________________________________________________________________________________________
|
conan/tools/env/virtualbuildenv.py | gmeeker/conan | 6,205 | 11158569 | <gh_stars>1000+
from conan.tools.env import Environment
from conan.tools.env.virtualrunenv import runenv_from_cpp_info
class VirtualBuildEnv:
""" captures the conanfile environment that is defined from its
dependencies, and also from profiles
"""
def __init__(self, conanfile):
self._conanfile = conanfile
self._conanfile.virtualbuildenv = False
self.basename = "conanbuildenv"
# TODO: Make this use the settings_build
self.configuration = conanfile.settings.get_safe("build_type")
if self.configuration:
self.configuration = self.configuration.lower()
self.arch = conanfile.settings.get_safe("arch")
if self.arch:
self.arch = self.arch.lower()
@property
def _filename(self):
f = self.basename
if self.configuration:
f += "-" + self.configuration
if self.arch:
f += "-" + self.arch
return f
def environment(self):
""" collects the buildtime information from dependencies. This is the typical use case
of build_requires defining information for consumers
"""
# FIXME: Cache value?
build_env = Environment(self._conanfile)
# Top priority: profile
profile_env = self._conanfile.buildenv
build_env.compose_env(profile_env)
build_requires = self._conanfile.dependencies.build.topological_sort
for require, build_require in reversed(build_requires.items()):
if require.direct: # Only buildenv_info from direct deps is propagated
# higher priority, explicit buildenv_info
if build_require.buildenv_info:
build_env.compose_env(build_require.buildenv_info)
# Lower priority, the runenv of all transitive "requires" of the build requires
if build_require.runenv_info:
build_env.compose_env(build_require.runenv_info)
# Then the implicit
if hasattr(self._conanfile, "settings_build"):
os_name = self._conanfile.settings_build.get_safe("os")
else:
os_name = self._conanfile.settings.get_safe("os")
build_env.compose_env(runenv_from_cpp_info(self._conanfile, build_require, os_name))
# Requires in host context can also bring some direct buildenv_info
host_requires = self._conanfile.dependencies.host.topological_sort
for require in reversed(host_requires.values()):
if require.buildenv_info:
build_env.compose_env(require.buildenv_info)
return build_env
def generate(self, group="build"):
build_env = self.environment()
if build_env: # Only if there is something defined
build_env.save_script(self._filename, group=group)
|
selim_sef-solution/lucid/scratch/pretty_graphs/graph.py | Hulihrach/RoadDetector | 4,537 | 11158605 | <reponame>Hulihrach/RoadDetector<filename>selim_sef-solution/lucid/scratch/pretty_graphs/graph.py<gh_stars>1000+
import numpy as np
import tensorflow as tf
import lucid.modelzoo.vision_models as models
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
from lucid.misc.io import show, load
from lucid.misc.io.showing import _image_url, _display_html
from collections import defaultdict
class Node(object):
def __init__(self, name, op, graph, pretty_name=None):
self.name = name
self.op = op
self.graph = graph
self.pretty_name = pretty_name
def __repr__(self):
return "<%s: %s>" % (self.name, self.op)
@property
def inputs(self):
return self.graph.node_to_inputs[self.name]
@property
def consumers(self):
return self.graph.node_to_consumers[self.name]
def copy(self):
return Node(self.name, self.op, self.graph)
class Graph(object):
def __init__(self):
self.nodes = []
self.name_map = {}
self.node_to_consumers = defaultdict(lambda: [])
self.node_to_inputs = defaultdict(lambda: [])
def add_node(self, node):
self.nodes.append(node)
self.name_map[node.name] = node
def add_edge(self, node1, node2):
node1, node2 = self[node1], self[node2]
self.node_to_consumers[node1.name].append(node2)
self.node_to_inputs[node2.name].append(node1)
def __getitem__(self, index):
if isinstance(index, str):
return self.name_map[index]
elif isinstance(index, Node):
return self.name_map[index.name]
else:
raise Exception("Unsupported index for Graph", type(index) )
def graphviz(self, groups=None):
print("digraph G {")
if groups is not None:
for root, group in groups.items():
print("")
print((" subgraph", "cluster_%s" % root.name.replace("/", "_"), "{"))
print((" label = \"%s\"") % (root.pretty_name or root.name))
for node in group:
print((" \"%s\"") % (node.pretty_name or node.name))
print(" }")
for node in self.nodes:
for inp in node.inputs:
print((" ", '"' + (inp.pretty_name or inp.name) + '"', " -> ", '"' + (node.pretty_name or node.name) + '"'))
print("}")
@staticmethod
def from_graphdef(graphdef):
graph = Graph()
for raw_node in graphdef.node:
graph.add_node(Node(raw_node.name, raw_node.op, graph))
for raw_node in graphdef.node:
for raw_inp in raw_node.input:
if raw_inp.startswith('^'): # skip control inputs
continue
raw_inp_name = raw_inp.split(":")[0]
graph.add_edge(raw_inp_name, raw_node.name)
return graph
def filter_graph(graph, keep_nodes, pass_through=True):
new_graph = Graph()
for node in graph.nodes:
if node.name in keep_nodes:
new_node = node.copy()
new_node.graph = new_graph
new_node.subsumed = []
new_graph.add_node(new_node)
def kept_inputs(node):
ret = []
visited = []
def walk(inp):
if inp in visited: return
visited.append(inp)
if inp.name in keep_nodes:
ret.append(inp)
else:
if pass_through:
new_graph[node].subsumed.append(inp.name)
for inp2 in inp.inputs:
walk(inp2)
for inp in node.inputs:
walk(inp)
return ret
for node in graph.nodes:
if node.name in keep_nodes:
for inp in kept_inputs(node):
new_graph.add_edge(inp, node)
return new_graph
standard_include_ops = ["Placeholder", "Relu", "Relu6", "Add", "Split", "Softmax", "Concat", "ConcatV2", "Conv2D", "MaxPool", "AvgPool", "MatMul"] # Conv2D
def filter_graph_ops(graph, include_ops=standard_include_ops):
keep_nodes = [node.name for node in graph.nodes if node.op in include_ops]
return filter_graph(graph, keep_nodes)
def filter_graph_cut_shapes(graph):
keep_nodes = [node.name for node in graph.nodes if node.op != "Shape"]
return filter_graph(graph, keep_nodes, pass_through=False)
def filter_graph_dynamic(graph):
dynamic_nodes = []
def recursive_walk_forward(node):
if node.name in dynamic_nodes: return
dynamic_nodes.append(node.name)
for next in node.consumers:
recursive_walk_forward(next)
recursive_walk_forward(graph.nodes[0])
return filter_graph(graph, dynamic_nodes)
def filter_graph_collapse_sequence(graph, sequence):
exclude_nodes = []
for node in graph.nodes:
remainder = sequence[:]
matches = []
while remainder:
if len(node.consumers) > 1 and len(remainder) > 1:
break
if node.op == remainder[0]:
matches.append(node.name)
node = node.consumers[0]
remainder = remainder[1:]
else:
break
if len(remainder) == 0:
exclude_nodes += matches[:-1]
include_nodes = [node.name for node in graph.nodes
if node.name not in exclude_nodes]
return filter_graph(graph, include_nodes)
def clip_node_names(graph, prefix):
new_graph = Graph()
for node in graph.nodes:
new_node = node.copy()
new_node.graph = new_graph
new_node.subsumed = []
new_graph.add_node(new_node)
for inp in node.inputs:
new_graph.add_edge(inp, new_node)
for node in new_graph.nodes:
if node.name.startswith(prefix):
node.pretty_name = node.name[len(prefix):]
return new_graph
def find_groups(graph):
node_successors = {}
for node in graph.nodes:
node_successors[node.name] = set(node.inputs)
for inp in node.inputs:
node_successors[node.name] |= node_successors[inp.name]
concat_nodes = [node for node in graph.nodes
if node.op in ["Concat", "ConcatV2", "Add"] and len(node.inputs) > 1]
groups = {}
group_children = set()
for root_node in concat_nodes:
branch_heads = root_node.inputs
branch_nodes = [set([node]) | node_successors[node.name] for node in branch_heads]
branch_shared = set.intersection(*branch_nodes)
branch_uniq = set.union(*branch_nodes) - branch_shared
groups[root_node] = set([root_node]) | branch_uniq
group_children |= branch_uniq
for root in list(groups.keys()):
if root in group_children:
del groups[root]
return groups
|
minetorch/plugins/noise_detector.py | louis-she/torchpack | 127 | 11158631 | <reponame>louis-she/torchpack
import torch
from minetorch.plugin import Plugin
from minetorch.statable import Statable
class NoiseSampleDetector(Plugin, Statable):
"""This plugin helps to find out the suspicious noise samples.
provid a metric which compute a scalar for every sample, in most cases
the metric should be the loss function without reduce.
"""
def __init__(self, metric, topn=50):
super().__init__()
self.metric = metric
self.topn = topn
self.train_metrics = []
self.val_metrics = []
def before_init(self):
self.miner.statable[self.__class__.__name__] = self
self.train_dataloader = torch.utils.data.DataLoader(
self.miner.train_dataloader.dataset,
batch_size=self.miner.train_dataloader.batch_size,
num_workers=self.miner.train_dataloader.num_workers,
shuffle=False,
)
self.val_dataloader = torch.utils.data.DataLoader(
self.miner.val_dataloader.dataset,
batch_size=self.miner.train_dataloader.batch_size,
num_workers=self.miner.train_dataloader.num_workers,
shuffle=False,
)
def load_state_dict(self, data):
self.train_metrics = data[0]
self.val_metrics = data[1]
def state_dict(self):
return (self.train_metrics, self.val_metrics)
def after_epoch_end(self, **kwargs):
with torch.no_grad():
self.train_metrics.append(self._predict_dataset(self.train_dataloader))
self.val_metrics.append(self._predict_dataset(self.val_dataloader))
_, train_indices = torch.sort(
torch.std(torch.stack(self.train_metrics), dim=0), descending=True
)
_, val_indices = torch.sort(
torch.std(torch.stack(self.val_metrics), dim=0), descending=True
)
self.print_txt(
f"Train dataset most {self.topn} suspicious indices: {train_indices.tolist()[:self.topn]} \n"
f"Validation dataset most {self.topn} suspicious indices: {val_indices.tolist()[:self.topn]}",
"suspicious_noise_samples",
)
def _predict_dataset(self, dataloader):
results = torch.zeros([len(dataloader.dataset)])
for index, data in enumerate(dataloader):
predict = self.model(data[0].to(self.devices))
offset = index * dataloader.batch_size
results[offset : offset + dataloader.batch_size] = (
self.metric(predict, data[1].to(self.devices)).detach().cpu()
)
return results
|
test/win/large-pdb/large-pdb.gyp | chlorm-forks/gyp | 2,151 | 11158695 | <reponame>chlorm-forks/gyp
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'large_pdb_exe',
'type': 'executable',
'msvs_large_pdb': 1,
'sources': [
'main.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'ProgramDatabaseFile': '<(PRODUCT_DIR)/large_pdb_exe.exe.pdb',
},
},
},
{
'target_name': 'small_pdb_exe',
'type': 'executable',
'msvs_large_pdb': 0,
'sources': [
'main.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'ProgramDatabaseFile': '<(PRODUCT_DIR)/small_pdb_exe.exe.pdb',
},
},
},
{
'target_name': 'large_pdb_dll',
'type': 'shared_library',
'msvs_large_pdb': 1,
'sources': [
'dllmain.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'ProgramDatabaseFile': '<(PRODUCT_DIR)/large_pdb_dll.dll.pdb',
},
},
},
{
'target_name': 'small_pdb_dll',
'type': 'shared_library',
'msvs_large_pdb': 0,
'sources': [
'dllmain.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'ProgramDatabaseFile': '<(PRODUCT_DIR)/small_pdb_dll.dll.pdb',
},
},
},
{
'target_name': 'large_pdb_implicit_exe',
'type': 'executable',
'msvs_large_pdb': 1,
'sources': [
'main.cc',
],
# No PDB file is specified. However, the msvs_large_pdb mechanism should
# default to the appropriate <(PRODUCT_DIR)/<(TARGET_NAME).exe.pdb.
},
{
'target_name': 'large_pdb_variable_exe',
'type': 'executable',
'msvs_large_pdb': 1,
'sources': [
'main.cc',
],
# No PDB file is specified. However, the msvs_large_pdb_path variable
# explicitly sets one.
'variables': {
'msvs_large_pdb_path': '<(PRODUCT_DIR)/foo.pdb',
},
},
{
'target_name': 'large_pdb_product_exe',
'product_name': 'bar',
'type': 'executable',
'msvs_large_pdb': 1,
'sources': [
'main.cc',
],
# No PDB file is specified. However, we've specified a product name so
# it should use <(PRODUCT_DIR)/bar.exe.pdb.
},
]
}
|
python-toolbox/marvin_python_toolbox/common/data_source_provider.py | mechamoedson/incubator-marvin | 149 | 11158708 | <filename>python-toolbox/marvin_python_toolbox/common/data_source_provider.py
#!/usr/bin/env python
# coding=utf-8
# Copyright [2017] [B2W Digital]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Marvin Data Source module.
This module is responsible to create and provide diferents types of data source objects.
"""
def get_spark_session(enable_hive=False, app_name='marvin-engine', configs=[]):
"""Return a Spark Session object"""
# Prepare spark context to be used
import findspark
findspark.init()
from pyspark.sql import SparkSession
# prepare spark sesseion to be returned
spark = SparkSession.builder
spark = spark.appName(app_name)
spark = spark.enableHiveSupport() if enable_hive else spark
# if has configs
for config in configs:
spark = spark.config(config)
return spark.getOrCreate()
|
image_segmentation/utils/deeplab_model.py | wisespa/fritz-models | 277 | 11158714 | <filename>image_segmentation/utils/deeplab_model.py
import os
import tarfile
import numpy as np
from PIL import Image
from six.moves import urllib
import tempfile
import tensorflow as tf
MODEL_NAME = 'mobilenetv2_coco_voctrainaug'
_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'
_MODEL_URLS = {
'mobilenetv2_coco_voctrainaug':
'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz',
'mobilenetv2_coco_voctrainval':
'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz',
'xception_coco_voctrainaug':
'deeplabv3_pascal_train_aug_2018_01_04.tar.gz',
'xception_coco_voctrainval':
'deeplabv3_pascal_trainval_2018_01_04.tar.gz',
}
_TARBALL_NAME = 'deeplab_model.tar.gz'
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
def download_deeplab_model(model_name):
model_dir = tempfile.mkdtemp()
tf.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print(download_path)
print('downloading model, this might take a while...')
urllib.request.urlretrieve(
_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],
download_path
)
print('download completed! loading DeepLab model...')
model = DeepLabModel(download_path)
print('model loaded successfully!')
return model
|
tests/torchfunc_test.py | itsjoshthedeveloper/torchfunc | 210 | 11158730 | import sys
import time
import torch
import torchfunc
def test_timer_context_manager():
with torchfunc.Timer() as timer:
time.sleep(1)
last_in_block = timer.checkpoint() # register checkpoint
last_time = timer.checkpoint()
time.sleep(1)
assert last_time == timer.checkpoint() == timer.time()
assert last_in_block != last_time
def test_timer_decorator():
@torchfunc.Timer()
def wrapped():
time.sleep(1)
result = 0
for value in range(11):
result += value
return int(result / 55)
value, passed_time = wrapped()
assert value == 1
assert passed_time > 1
def test_seed():
torchfunc.seed(0)
assert 0 == torch.initial_seed()
def test_seed_str():
assert str(torchfunc.seed(0)) == "torchfunc.seed"
def test_seed_representation():
assert repr(torchfunc.seed(0)) == "torchfunc.seed(value=0, cuda=False)"
def test_seed_context_manager():
first_seed = torch.initial_seed()
with torchfunc.seed(0):
assert 0 == torch.initial_seed()
assert torch.initial_seed() == first_seed
def test_seed_decorator():
first_seed = torch.initial_seed()
@torchfunc.seed(0)
def wrapped():
assert 0 == torch.initial_seed()
wrapped()
assert torch.initial_seed() == first_seed
def test_info():
assert isinstance(torchfunc.info(), str)
def test_sizeof_tensor():
assert torchfunc.sizeof(torch.FloatTensor(12, 12)) == 12 * 12 * 4
def test_sizeof_model():
model = torch.nn.Linear(20, 20)
bias = 20 * 4
weights = 20 * 20 * 4
assert torchfunc.sizeof(model) == bias + weights
|
thumb_daemon.py | twnming/arxiv-sanity-lite | 501 | 11158767 | """
Iterates over the current database and makes best effort to download the papers,
convert them to thumbnail images and save them to disk, for display in the UI.
Atm only runs the most recent 5K papers. Intended to be run as a cron job daily
or something like that.
"""
import os
import time
import random
import requests
from subprocess import Popen
from aslite.db import get_papers_db, get_metas_db
# create the tmp directory if it does not exist, where we will do temporary work
TMP_DIR = 'tmp'
if not os.path.exists(TMP_DIR):
os.makedirs(TMP_DIR)
# create the thumb directory, where we will store the paper thumbnails
THUMB_DIR = os.path.join('static', 'thumb')
if not os.path.exists(THUMB_DIR):
os.makedirs(THUMB_DIR)
# open the database, determine which papers we'll try to get thumbs for
pdb = get_papers_db()
n = len(pdb)
mdb = get_metas_db()
metas = list(mdb.items())
metas.sort(key=lambda kv: kv[1]['_time'], reverse=True) # most recent papers first
keys = [k for k,v in metas[:5000]] # only the most recent papers
for i, key in enumerate(keys):
time.sleep(0.01) # for safety
# the path where we would store the thumbnail for this key
thumb_path = os.path.join(THUMB_DIR, key + '.jpg')
if os.path.exists(thumb_path):
continue
# fetch the paper
p = pdb[key]
print("%d/%d: paper to process: %s" % (i, n, key))
# get the link to the pdf
url = p['link'].replace('abs', 'pdf')
# attempt to download the pdf
print("attempting to download pdf from: ", url)
try:
x = requests.get(url, timeout=10, allow_redirects=True)
with open(os.path.join(TMP_DIR, 'paper.pdf'), 'wb') as f:
f.write(x.content)
print("OK")
except Exception as e:
print("error downloading the pdf at url", url)
print(e)
continue
time.sleep(5 + random.uniform(0, 5)) # take a breather
# mv away the previous temporary files if they exist
if os.path.isfile(os.path.join(TMP_DIR, 'thumb-0.png')):
for i in range(8):
f1 = os.path.join(TMP_DIR, 'thumb-%d.png' % (i,))
f2 = os.path.join(TMP_DIR, 'thumbbuf-%d.png' % (i,))
if os.path.isfile(f1):
cmd = 'mv %s %s' % (f1, f2)
os.system(cmd)
# convert pdf to png images per page. spawn async because convert can unfortunately enter an infinite loop, have to handle this.
# this command will generate 8 independent images thumb-0.png ... thumb-7.png of the thumbnails
print("converting the pdf to png images")
pp = Popen(['convert', '%s[0-7]' % ('tmp/paper.pdf', ), '-thumbnail', 'x156', os.path.join(TMP_DIR, 'thumb.png')])
t0 = time.time()
while time.time() - t0 < 20: # give it 20 seconds deadline
ret = pp.poll()
if not (ret is None):
# process terminated
break
time.sleep(0.1)
ret = pp.poll()
if ret is None:
print("convert command did not terminate in 20 seconds, terminating.")
pp.terminate() # give up
continue
if not os.path.isfile(os.path.join(TMP_DIR, 'thumb-0.png')):
# failed to render pdf, replace with missing image
#missing_thumb_path = os.path.join('static', 'missing.jpg')
#os.system('cp %s %s' % (missing_thumb_path, thumb_path))
#print("could not render pdf, creating a missing image placeholder")
print("could not render pdf, skipping")
continue
else:
# otherwise concatenate the 8 images into one
cmd = "montage -mode concatenate -quality 80 -tile x1 %s %s" \
% (os.path.join(TMP_DIR, 'thumb-*.png'), thumb_path)
print(cmd)
os.system(cmd)
# remove the temporary paper.pdf file
tmp_pdf = os.path.join(TMP_DIR, 'paper.pdf')
if os.path.isfile(tmp_pdf):
os.remove(tmp_pdf)
|
boto3_type_annotations/boto3_type_annotations/lex_models/paginator.py | cowboygneox/boto3_type_annotations | 119 | 11158788 | from typing import Dict
from botocore.paginate import Paginator
class GetBotAliases(Paginator):
def paginate(self, botName: str, nameContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetBotChannelAssociations(Paginator):
def paginate(self, botName: str, botAlias: str, nameContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetBotVersions(Paginator):
def paginate(self, name: str, PaginationConfig: Dict = None) -> Dict:
pass
class GetBots(Paginator):
def paginate(self, nameContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetBuiltinIntents(Paginator):
def paginate(self, locale: str = None, signatureContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetBuiltinSlotTypes(Paginator):
def paginate(self, locale: str = None, signatureContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetIntentVersions(Paginator):
def paginate(self, name: str, PaginationConfig: Dict = None) -> Dict:
pass
class GetIntents(Paginator):
def paginate(self, nameContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
class GetSlotTypeVersions(Paginator):
def paginate(self, name: str, PaginationConfig: Dict = None) -> Dict:
pass
class GetSlotTypes(Paginator):
def paginate(self, nameContains: str = None, PaginationConfig: Dict = None) -> Dict:
pass
|
iamport/__init__.py | hrxorxm/iamport-rest-client-python | 126 | 11158796 | from .client import Iamport
__all__ = ['Iamport']
|
qf_lib_tests/unit_tests/portfolio_construction/test_max_diversification_portfolio.py | webclinic017/qf-lib | 198 | 11158823 | # Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import TestCase
import numpy as np
from qf_lib_tests.unit_tests.portfolio_construction.utils import assets_df
from qf_lib.portfolio_construction.portfolio_models.max_diversification_portfolio import \
MaxDiversificationPortfolio
class TestMaxDiversificationPortfolio(TestCase):
@classmethod
def setUpClass(cls):
cls.assets_df = assets_df
def test_get_weights(self):
portfolio = MaxDiversificationPortfolio(self.assets_df.cov(), self.assets_df.std())
actual_weights = portfolio.get_weights()
expected_weights_vals = np.zeros(20)
expected_weights_vals[1] = 0.0393
expected_weights_vals[2] = 0.0569
expected_weights_vals[3] = 0.0249
expected_weights_vals[5] = 0.1076
expected_weights_vals[6] = 0.0864
expected_weights_vals[7] = 0.0830
expected_weights_vals[9] = 0.0528
expected_weights_vals[10] = 0.1137
expected_weights_vals[11] = 0.0664
expected_weights_vals[12] = 0.0730
expected_weights_vals[14] = 0.0672
expected_weights_vals[16] = 0.0584
expected_weights_vals[17] = 0.0575
expected_weights_vals[18] = 0.0567
expected_weights_vals[19] = 0.0562
self.assertTrue(np.allclose(expected_weights_vals, actual_weights.values, rtol=0, atol=1e-04))
def test_get_weights_with_upper_limits(self):
portfolio = MaxDiversificationPortfolio(self.assets_df.cov(), self.assets_df.std(), upper_constraint=0.1)
actual_weights = portfolio.get_weights()
expected_weights_vals = np.zeros(20)
expected_weights_vals[1] = 0.0404
expected_weights_vals[2] = 0.0583
expected_weights_vals[3] = 0.0264
expected_weights_vals[5] = 0.0999
expected_weights_vals[6] = 0.0876
expected_weights_vals[7] = 0.0845
expected_weights_vals[9] = 0.0533
expected_weights_vals[10] = 0.0999
expected_weights_vals[11] = 0.0682
expected_weights_vals[12] = 0.0755
expected_weights_vals[14] = 0.0682
expected_weights_vals[16] = 0.0581
expected_weights_vals[17] = 0.0600
expected_weights_vals[18] = 0.0604
expected_weights_vals[19] = 0.0592
self.assertTrue(np.allclose(expected_weights_vals, actual_weights.values, rtol=0, atol=1e-04))
if __name__ == '__main__':
unittest.main()
|
modules/pairwise.py | roysubhankar/L2C | 311 | 11158842 | <gh_stars>100-1000
import torch
def PairEnum(x,mask=None):
# Enumerate all pairs of feature in x
assert x.ndimension() == 2, 'Input dimension must be 2'
x1 = x.repeat(x.size(0),1)
x2 = x.repeat(1,x.size(0)).view(-1,x.size(1))
if mask is not None:
xmask = mask.view(-1,1).repeat(1,x.size(1))
#dim 0: #sample, dim 1:#feature
x1 = x1[xmask].view(-1,x.size(1))
x2 = x2[xmask].view(-1,x.size(1))
return x1,x2
def Class2Simi(x,mode='cls',mask=None):
# Convert class label to pairwise similarity
n=x.nelement()
assert (n-x.ndimension()+1)==n,'Dimension of Label is not right'
expand1 = x.view(-1,1).expand(n,n)
expand2 = x.view(1,-1).expand(n,n)
out = expand1 - expand2
out[out!=0] = -1 #dissimilar pair: label=-1
out[out==0] = 1 #Similar pair: label=1
if mode=='cls':
out[out==-1] = 0 #dissimilar pair: label=0
if mode=='hinge':
out = out.float() #hingeloss require float type
if mask is None:
out = out.view(-1)
else:
mask = mask.detach()
out = out[mask]
return out
|
testsuite/utils/http.py | okutane/yandex-taxi-testsuite | 128 | 11158862 | <filename>testsuite/utils/http.py
import json
import typing
import urllib.parse
import aiohttp.web
class BaseError(Exception):
pass
class MockedError(BaseError):
"""Base class for mockserver mocked errors."""
error_code = 'unknown'
class TimeoutError(MockedError): # pylint: disable=redefined-builtin
"""Exception used to mock HTTP client timeout errors.
Requires service side support.
Available as ``mockserver.TimeoutError`` alias
or by full name ``testsuite.utils.http.TimeoutError``.
"""
error_code = 'timeout'
class NetworkError(MockedError):
"""Exception used to mock HTTP client netowork errors.
Requires service side support.
Available as ``mockserver.NetworkError`` alias
or by full name ``testsuite.utils.http.NetworkError``.
"""
error_code = 'network'
class HttpResponseError(BaseError):
def __init__(self, *, url: str, status: int):
self.url = url
self.status = status
super().__init__(f'status={self.status}, url=\'{self.url}\'')
class Request:
""" Adapts aiohttp.web.Request to mimic a frequently used subset of
werkzeug.Request interface. ``data`` property is not supported,
use get_data() instead.
"""
def __init__(self, request: aiohttp.web.Request, data: bytes):
self._request = request
self._data: bytes = data
self._json: object = None
self._form: typing.Optional[typing.Dict[str, str]] = None
@property
def method(self) -> str:
return self._request.method
@property
def url(self) -> str:
return str(self._request.url)
@property
def path(self) -> str:
return self._request.path
# For backward compatibility with code using aiohttp.web.Request
@property
def path_qs(self) -> str:
return self._request.raw_path
@property
def query_string(self) -> bytes:
path_and_query = self._request.raw_path.split('?')
if len(path_and_query) < 2:
return b''
return path_and_query[1].encode()
@property
def headers(self):
return self._request.headers
@property
def content_type(self):
return self._request.content_type
def get_data(self) -> bytes:
return self._data
@property
def form(self):
if self._form is None:
if self._request.content_type in (
'',
'application/x-www-form-urlencoded',
):
charset = self._request.charset or 'utf-8'
items = urllib.parse.parse_qsl(
self._data.rstrip().decode(charset),
keep_blank_values=True,
encoding=charset,
)
self._form = {key: value for key, value in items}
else:
self._form = {}
return self._form
@property
def json(self) -> typing.Any:
if self._json is None:
bytes_body = self.get_data()
encoding = self._request.charset or 'utf-8'
self._json = json.loads(bytes_body, encoding=encoding)
return self._json
@property
def cookies(self) -> typing.Mapping[str, str]:
return self._request.cookies
@property
def args(self):
return self._request.query
# For backward compatibility with code using aiohttp.web.Request
@property
def query(self):
return self._request.query
class _NoValue:
pass
async def wrap_request(request: aiohttp.web.Request):
if request.headers.get('expect') == '100-continue':
await request.writer.write(b'HTTP/1.1 100 Continue\r\n\r\n')
await request.writer.drain()
data = await request.content.read()
return Request(request, data)
class ClientResponse:
def __init__(self, response: aiohttp.ClientResponse, content: bytes):
self._response = response
self._content: bytes = content
self._text: typing.Optional[str] = None
@property
def status_code(self) -> int:
return self._response.status
# For backward compatibility with code using async ClientResponse
@property
def status(self) -> int:
return self._response.status
@property
def reason(self) -> typing.Optional[str]:
return self._response.reason
@property
def content(self) -> bytes:
return self._content
@property
def text(self) -> str:
if self._text is None:
encoding = self._response.get_encoding()
self._text = str(self._content, encoding)
return self._text
def json(self) -> typing.Any:
encoding = self._response.get_encoding()
return json.loads(self._content, encoding=encoding)
@property
def headers(self):
return self._response.headers
@property
def content_type(self):
return self._response.content_type
@property
def encoding(self):
return self._response.get_encoding()
@property
def cookies(self):
return self._response.cookies
def raise_for_status(self) -> None:
if self._response.status < 400:
return
self._response.release()
raise HttpResponseError(
url=self._response.request_info.url, status=self._response.status,
)
async def wrap_client_response(response: aiohttp.ClientResponse):
content = await response.read()
wrapped = ClientResponse(response, content)
return wrapped
def make_response(
response: typing.Union[str, bytes, bytearray] = None,
status: int = 200,
headers: typing.Mapping[str, str] = None,
content_type: typing.Optional[str] = None,
charset: typing.Optional[str] = None,
*,
json=_NoValue,
) -> aiohttp.web.Response:
"""
Create HTTP response object. Returns ``aiohttp.web.Response`` instance.
:param response: response content
:param status: HTTP status code
:param headers: HTTP headers dictionary
:param content_type: HTTP Content-Type header
:param charset: Response character set
:param json: JSON response shortcut
"""
if json is not _NoValue:
response = _json_response(json)
if content_type is None:
content_type = 'application/json'
if isinstance(response, (bytes, bytearray)):
return aiohttp.web.Response(
body=response,
status=status,
headers=headers,
content_type=content_type,
charset=charset,
)
if isinstance(response, str):
return aiohttp.web.Response(
text=response,
status=status,
headers=headers,
content_type=content_type,
charset=charset,
)
if response is None:
return aiohttp.web.Response(
headers=headers,
status=status,
content_type=content_type,
charset=charset,
)
raise RuntimeError(f'Unsupported response {response!r} given')
def _json_response(data: typing.Any) -> bytes:
text = json.dumps(data, ensure_ascii=False)
return text.encode('utf-8')
|
2021/quals/pwn-memsafety/attachments/chal.py | BearerPipelineTest/google-ctf | 2,757 | 11158866 | <filename>2021/quals/pwn-memsafety/attachments/chal.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import sys
import json
def socket_print(string):
print("=====", string, flush=True)
def get_user_input():
socket_print("Enter partial source for edge compute app (EOF to finish):")
user_input = []
while True:
try:
line = input()
except EOFError:
break
if line == "EOF":
break
user_input.append(line)
socket_print("Input accepted!")
return user_input
def write_to_rs(contents):
socket_print("Writing source to disk...")
rs_prelude = """#![no_std]
use proc_sandbox::sandbox;
#[sandbox]
pub mod user {
// BEGIN PLAYER REPLACEABLE SECTION
""".splitlines()
with open('/home/user/sources/user-0/src/lib.rs', 'w') as fd:
fd.write('\n'.join(rs_prelude))
fd.write('\n'.join(contents))
fd.write("\n}\n")
def check_user_input():
socket_print("Validating user input before compiling...")
result = subprocess.run("/home/user/rustup/toolchains/nightly-2020-10-08-x86_64-unknown-linux-gnu/bin/rustc user-0/src/lib.rs -Zast-json=yes", cwd="/home/user/sources", shell=True, timeout=150, capture_output=True)
try:
ast = json.loads(result.stdout)
if len(ast["module"]["items"]) != 5:
socket_print("Module escaping detected, aborting.")
sys.exit(1)
except json.JSONDecodeError:
socket_print("Something went wrong during validation -- is your input malformed?")
sys.exit(1)
def build_challenge():
socket_print("Building edge compute app...")
shutil.copytree("/home/user/build-cache", "/tmp/chal-build")
# `rustc --version` == "rustc 1.47.0"
result = subprocess.run("PATH=/usr/bin:$PATH LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/ CARGO_TARGET_DIR=/tmp/chal-build /usr/bin/cargo build --frozen --offline", cwd="/home/user/sources", shell=True, timeout=150)
if result.returncode:
socket_print("non-zero return code on compilation: " + str(result.returncode))
sys.exit(1)
socket_print("Build complete!")
def run_challenge():
socket_print("Testing edge compute app...")
result = subprocess.run("/tmp/chal-build/debug/server", shell=True, timeout=10)
socket_print("Test complete!")
def main():
user_input = get_user_input()
write_to_rs(user_input)
build_challenge()
# Check user input after building since the compilation in check_user_input() will
# generate errors after generating the ast since the compilation command is
# incomplete. Let the proper build run first so users can be presented with any
# compilation issues, then validate it before we actually run.
check_user_input()
run_challenge()
if __name__ == "__main__":
main()
|
Dashboards/migrate_screenboard.py | nadaj/Miscellany | 155 | 11158917 | <filename>Dashboards/migrate_screenboard.py
from datadog import initialize, api
old_api = "*****"
old_app = "*****"
screenboard_id = ****
options = {
'api_key': old_api,
'app_key': old_app
}
initialize(**options)
screenboard = api.Screenboard.get(screenboard_id)
print(screenboard)
new_api = '*****'
new_app = '*****'
options = {
'api_key': new_api,
'app_key': new_app
}
initialize(**options)
new = api.Screenboard.create(
board_title=screenboard['board_title'],
widgets=screenboard['widgets'],
template_variables=screenboard['template_variables'],
height=screenboard['height'],
width=screenboard['width']
)
print(new)
|
archai/datasets/limit_dataset.py | shatadru99/archai | 344 | 11158921 | <reponame>shatadru99/archai
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Tuple, Union, Optional
import torch
from torch.utils.data import \
SubsetRandomSampler, Sampler, Subset, ConcatDataset, Dataset, random_split
class LimitDataset(Dataset):
def __init__(self, dataset, n):
self.dataset = dataset
self.n = n
if hasattr(dataset, 'targets'):
self.targets = dataset.targets[:n]
def __len__(self):
return self.n
def __getitem__(self, i):
return self.dataset[i]
DatasetLike = Optional[Union[Dataset, Subset, ConcatDataset, LimitDataset]]
|
app/models/menu.py | Allen7D/mini-shop-server | 533 | 11158923 | # _*_ coding: utf-8 _*_
"""
Created by Mohan on 2020/4.
"""
from sqlalchemy import Column, Integer, ForeignKey
from app.core.db import BaseModel as Model
__author__ = 'Mohan'
class Menu(Model):
__tablename__ = 'menu'
group_id = Column(Integer, ForeignKey('group.id'), primary_key=True, comment='外键 权限组ID')
route_id = Column(Integer, ForeignKey('route.id'), primary_key=True, comment='外键 路由节点ID')
|
setup.py | ai-med/squeeze_and_excitation | 227 | 11158938 | <filename>setup.py<gh_stars>100-1000
import setuptools
setuptools.setup(name="squeeze-and-excitation",
version="1.0",
url="https://github.com/abhi4ssj/squeeze_and_excitation",
author="<NAME> and <NAME>",
author_email="<EMAIL>",
description="Squeeze and Excitation pytorch implementation",
packages=setuptools.find_packages(),
install_requires=['numpy>=1.14.0', 'torch>=1.0.0'],
python_requires='>=3.5')
|
tests/test-scripts/fil-interpreter.py | pythonspeed/filprofiler | 521 | 11158955 | """Tests that need to be run under `fil-profile python`.
To run:
$ fil-profile python -m pytest tests/test-scripts/fil-interpreter.py
"""
import sys
import os
from ctypes import c_void_p
import re
from pathlib import Path
from subprocess import check_output, check_call
import multiprocessing
import pytest
import numpy as np
import numpy.core.numeric
from pampy import _ as ANY, match
from IPython.core.displaypub import CapturingDisplayPublisher
from IPython.core.interactiveshell import InteractiveShell
import threadpoolctl
from filprofiler._tracer import (
preload,
start_tracing,
stop_tracing,
disable_thread_pools,
)
from filprofiler._testing import get_allocations, big, as_mb
from filprofiler._ipython import run_with_profile
from filprofiler.api import profile
from pymalloc import pymalloc
import fil_api
def test_no_profiling():
"""Neither memory tracking nor Python profiling happen by default."""
address = pymalloc(365)
# No information about size available, since it's not tracked:
assert preload.pymemprofile_get_allocation_size(c_void_p(address)) == 0
assert sys.getprofile() is None
def test_temporary_profiling(tmpdir):
"""Profiling can be run temporarily."""
# get_allocations() expects actual output in a subdirectory.
def f():
arr = np.ones((1024, 1024, 4), dtype=np.uint64) # 32MB
del arr
return 1234
result = profile(f, tmpdir / "output")
assert result == 1234
# Allocations were tracked:
path = ((__file__, "f", 49), (numpy.core.numeric.__file__, "ones", ANY))
allocations = get_allocations(tmpdir)
assert match(allocations, {path: big}, as_mb) == pytest.approx(32, 0.1)
# Profiling stopped:
test_no_profiling()
def run_in_ipython_shell(code_cells):
"""Run a list of strings in IPython.
Returns parsed allocations.
"""
InteractiveShell.clear_instance()
shell = InteractiveShell.instance(display_pub_class=CapturingDisplayPublisher)
for code in code_cells:
shell.run_cell(code)
InteractiveShell.clear_instance()
html = shell.display_pub.outputs[-1]["data"]["text/html"]
assert "<iframe" in html
[svg_path] = re.findall('src="([^"]*)"', html)
assert svg_path.endswith("peak-memory.svg")
resultdir = Path(svg_path).parent.parent
return get_allocations(resultdir)
def test_ipython_profiling(tmpdir):
"""Profiling can be run via IPython magic."""
cwd = os.getcwd()
os.chdir(tmpdir)
allocations = run_in_ipython_shell(
[
"%load_ext filprofiler",
"""\
%%filprofile
import numpy as np
arr = np.ones((1024, 1024, 4), dtype=np.uint64) # 32MB
""",
]
)
# Allocations were tracked:
path = (
(re.compile("<ipython-input-1-.*"), "__magic_run_with_fil", 3),
(numpy.core.numeric.__file__, "ones", ANY),
)
assert match(allocations, {path: big}, as_mb) == pytest.approx(32, 0.1)
# Profiling stopped:
test_no_profiling()
def test_ipython_exception_while_profiling(tmpdir):
"""
Profiling can be run via IPython magic, still profiles and shuts down
correctly on an exception.
This will log a RuntimeError. That is expected.
"""
cwd = os.getcwd()
os.chdir(tmpdir)
allocations = run_in_ipython_shell(
[
"%load_ext filprofiler",
"""\
%%filprofile
import numpy as np
arr = np.ones((1024, 1024, 2), dtype=np.uint64) # 16MB
raise RuntimeError("The test will log this, it's OK.")
arr = np.ones((1024, 1024, 8), dtype=np.uint64) # 64MB
""",
]
)
# Allocations were tracked:
path = (
(re.compile("<ipython-input-1-.*"), "__magic_run_with_fil", 3),
(numpy.core.numeric.__file__, "ones", ANY),
)
assert match(allocations, {path: big}, as_mb) == pytest.approx(16, 0.1)
# Profiling stopped:
test_no_profiling()
def test_ipython_non_standard_indent(tmpdir):
"""
Profiling can be run via IPython magic, still profiles and shuts down
correctly on an exception.
This will log a RuntimeError. That is expected.
"""
cwd = os.getcwd()
os.chdir(tmpdir)
allocations = run_in_ipython_shell(
[
"%load_ext filprofiler",
"""\
%%filprofile
import numpy as np
def f(): # indented with 5 spaces what
arr = np.ones((1024, 1024, 2), dtype=np.uint64) # 16MB
f()
""",
]
)
# Allocations were tracked:
path = (
(re.compile("<ipython-input-1-.*"), "__magic_run_with_fil", 5),
(re.compile("<ipython-input-1-.*"), "f", 4),
(numpy.core.numeric.__file__, "ones", ANY),
)
assert match(allocations, {path: big}, as_mb) == pytest.approx(16, 0.1)
# Profiling stopped:
test_no_profiling()
@pytest.mark.parametrize(
"profile_func",
[
lambda f, tempdir: run_with_profile(f),
profile,
],
)
def test_profiling_disables_threadpools(tmpdir, profile_func):
"""
Memory profiling disables thread pools, then restores them when done.
"""
cwd = os.getcwd()
os.chdir(tmpdir)
import numexpr
import blosc
numexpr.set_num_threads(3)
blosc.set_nthreads(3)
with threadpoolctl.threadpool_limits(3, "blas"):
def check():
assert numexpr.set_num_threads(2) == 1
assert blosc.set_nthreads(2) == 1
for d in threadpoolctl.threadpool_info():
assert d["num_threads"] == 1, d
profile_func(check, tmpdir)
# Resets when done:
assert numexpr.set_num_threads(2) == 3
assert blosc.set_nthreads(2) == 3
for d in threadpoolctl.threadpool_info():
if d["user_api"] == "blas":
assert d["num_threads"] == 3, d
def test_profiling_without_blosc_and_numexpr(tmpdir):
"""
The support for numexpr and blosc is optional; disabling them should work
even when they're not present.
"""
import sys
sys.modules["blosc"] = None
sys.modules["numexpr"] = None
try:
with disable_thread_pools():
pass
finally:
del sys.modules["blosc"]
del sys.modules["numexpr"]
def test_subprocess(tmpdir):
"""
Running a subprocess doesn't blow up.
"""
start_tracing(tmpdir)
try:
output = check_output(["printf", "hello"])
finally:
stop_tracing(tmpdir)
assert output == b"hello"
def test_subprocess_2(tmpdir):
"""
Test a process that, on macOS, would fail (see
https://github.com/pythonspeed/filprofiler/issues/230). Brew processes are
compiled or linked differently somehow.
"""
start_tracing(tmpdir)
try:
check_call(["gfortran", "--version"])
finally:
stop_tracing(tmpdir)
@pytest.mark.parametrize("mode", ["spawn", "forkserver", "fork"])
def test_multiprocessing(tmpdir, mode):
"""
Running a subprocess via multiprocessing in the various different modes
doesn't blow up.
"""
# Non-tracing:
with multiprocessing.get_context(mode).Pool(processes=1) as pool:
assert pool.apply((3).__add__, (4,)) == 7
# Tracing:
start_tracing(tmpdir)
try:
with multiprocessing.get_context(mode).Pool(processes=1) as pool:
assert pool.apply((3).__add__, (4,)) == 7
finally:
stop_tracing(tmpdir)
@pytest.mark.parametrize("mode", ["spawn", "forkserver", "fork"])
def test_multiprocessing_good_error_message_fil_api(tmpdir, mode):
"""
Using Fil API from a subprocess gives a reasonable error message.
"""
start_tracing(tmpdir)
try:
with multiprocessing.get_context(mode).Pool(processes=1) as pool:
with pytest.raises(RuntimeError) as e:
pool.apply(fil_api.run_with_fil)
finally:
stop_tracing(tmpdir)
|
saas/pagination.py | kaiserho/djaodjin-saas | 383 | 11158980 | <filename>saas/pagination.py
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import OrderedDict
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from . import settings
from .models import (sum_dest_amount, sum_orig_amount, sum_balance_amount,
Transaction)
class BalancePagination(PageNumberPagination):
"""
Decorate the results of an API call with balance on an account
containing *selector*.
"""
def paginate_queryset(self, queryset, request, view=None):
self.start_at = view.start_at
self.ends_at = view.ends_at
if view.selector is not None:
dest_totals = sum_dest_amount(queryset.filter(
dest_account__icontains=view.selector))
orig_totals = sum_orig_amount(queryset.filter(
orig_account__icontains=view.selector))
else:
dest_totals = sum_dest_amount(queryset)
orig_totals = sum_orig_amount(queryset)
balance = sum_balance_amount(dest_totals, orig_totals)
self.balance_amount = balance['amount']
self.balance_unit = balance['unit']
return super(BalancePagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return Response(OrderedDict([
('start_at', self.start_at),
('ends_at', self.ends_at),
('balance_amount', self.balance_amount),
('balance_unit', self.balance_unit),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'start_at': {
'type': 'string',
'format': 'date',
'description': "Start of the date range for which"\
" the balance was computed"
},
'ends_at': {
'type': 'string',
'format': 'date',
'description': "End of the date range for which"\
" the balance was computed"
},
'balance_amount': {
'type': 'integer',
'description': "balance of all transactions in cents"\
" (i.e. 100ths) of unit"
},
'balance_unit': {
'type': 'integer',
'description': "three-letter ISO 4217 code"\
" for currency unit (ex: usd)"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class RoleListPagination(PageNumberPagination):
def get_paginated_response(self, data):
return Response(OrderedDict([
('invited_count', self.request.invited_count),
('requested_count', self.request.requested_count),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'invited_count': {
'type': 'integer',
'description': "Number of user invited to have a role"
},
'requested_count': {
'type': 'integer',
'description': "Number of user requesting a role"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class StatementBalancePagination(PageNumberPagination):
"""
Decorate the results of an API call with the balance as shown
in an organization statement.
"""
def paginate_queryset(self, queryset, request, view=None):
self.start_at = view.start_at
self.ends_at = view.ends_at
self.balance_amount, self.balance_unit \
= Transaction.objects.get_statement_balance(view.organization)
return super(StatementBalancePagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return Response(OrderedDict([
('start_at', self.start_at),
('ends_at', self.ends_at),
('balance_amount', self.balance_amount),
('balance_unit', self.balance_unit),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'start_at': {
'type': 'string',
'format': 'date',
'description': "Start of the date range for which"\
" the balance was computed"
},
'ends_at': {
'type': 'string',
'format': 'date',
'description': "End of the date range for which"\
" the balance was computed"
},
'balance_amount': {
'type': 'integer',
'description': "balance of all transactions in cents"\
" (i.e. 100ths) of unit"
},
'balance_unit': {
'type': 'integer',
'description': "three-letter ISO 4217 code"\
" for currency unit (ex: usd)"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class TotalPagination(PageNumberPagination):
def paginate_queryset(self, queryset, request, view=None):
self.start_at = view.start_at
self.ends_at = view.ends_at
self.totals = view.totals
return super(TotalPagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return Response(OrderedDict([
('start_at', self.start_at),
('ends_at', self.ends_at),
('balance_amount', self.totals['amount']),
('balance_unit', self.totals['unit']),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'balance_amount': {
'type': 'integer',
'description': "The sum of all record amounts (in unit)"
},
'balance_unit': {
'type': 'integer',
'description': "three-letter ISO 4217 code"\
" for currency unit (ex: usd)"
},
'count': {
'type': 'integer',
'description': "The number of records"
},
'next': {
'type': 'string',
'description': "API end point to get the next page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'previous': {
'type': 'string',
'description': "API end point to get the previous page"\
"of records matching the query",
'nullable': True,
'format': 'uri',
},
'results': schema,
},
}
class TypeaheadPagination(PageNumberPagination):
page_size = settings.MAX_TYPEAHEAD_CANDIDATES
def paginate_queryset(self, queryset, request, view=None):
self.count = queryset.count()
if self.count > self.page_size:
# returning an empty set if the number of results is greater than
# MAX_TYPEAHEAD_CANDIDATES
queryset = queryset.none()
self.count = 0
return list(queryset)
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('results', data)
]))
def get_schema_operation_parameters(self, view):
return []
def get_paginated_response_schema(self, schema):
return {
'type': 'object',
'properties': {
'count': {
'type': 'integer',
'description': "The number of records"
},
'results': schema,
},
}
|
logging_course/lesson7/task3/db_handler.py | behzod/pycharm-courses | 213 | 11159012 | from __future__ import print_function
import logging
import logging.config
import datetime
import sqlite3 as sqlite
class DatabaseHandler(logging.Handler):
""" Store log records in a sqlite database.
"""
def __init__(self, filename):
super(DatabaseHandler, self).__init__()
self.db = sqlite.connect(filename)
try:
self.db.execute(
"CREATE TABLE logger(record_id INTEGER PRIMARY KEY, name TEXT," \
"asctime TEXT, level TEXT, funcName TEXT, lineno INTEGER," \
"module TEXT, message TEXT);")
self.db.commit()
except sqlite.OperationalError as e:
logging.info('database filename=%s already exists', filename)
def emit(self, record):
if self.db:
timestring = datetime.datetime.utcfromtimestamp(record.created).isoformat() + 'Z'
message = record.msg % record.args
self.acquire()
try:
self.db.execute("INSERT INTO logger(name, asctime, level, funcName, lineno, module, message) " \
"VALUES(?, ?, ?, ?, ?, ?, ?);",
(record.name, timestring, record.levelname, record.funcName, record.lineno, record.module, message))
self.db.commit()
finally:
self.release()
def close(self):
self.db.close()
self.db = None
super(DatabaseHandler, self).close()
if __name__ == '__main__':
db_filename = '../../Sandbox/log.db'
log_config = {
'version': 1,
'handlers': {
'db': {
'class': 'db_handler.DatabaseHandler',
'filename': db_filename
}
},
'root': {
'handlers': ['db'],
'level': 'DEBUG'
}
}
logging.config.dictConfig(log_config)
logging.debug('Configured logging to database filename=%s', db_filename)
logging.warn('root logger warning message')
logging.debug('Connecting to database to read warning counts')
db = sqlite.connect(db_filename)
result = db.execute('select count(*) from logger where level="WARNING"')
print('Number of WARNING log messages in database is %s' % result.fetchone()[0])
result = db.execute('select count(*) from logger where level="DEBUG"')
print('Number of DEBUG log messages in database is %s' % result.fetchone()[0])
|
theseus/geometry/tests/test_se2.py | jeffin07/theseus | 236 | 11159049 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import torch
import theseus as th
from theseus.constants import EPS
from theseus.core.tests.common import check_copy_var
from theseus.utils import numeric_jacobian
from .common import (
check_adjoint,
check_compose,
check_exp_map,
check_inverse,
check_log_map,
check_projection_for_compose,
check_projection_for_exp_map,
check_projection_for_inverse,
check_projection_for_rotate_and_transform,
)
def create_random_se2(batch_size, rng):
theta = torch.rand(batch_size, 1, generator=rng) * 2 * np.pi - np.pi
u = torch.randn(batch_size, 2)
tangent_vector = torch.cat([u, theta], dim=1)
return th.SE2.exp_map(tangent_vector.double())
def test_exp_map():
for batch_size in [1, 20, 100]:
theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size))
u = torch.randn(batch_size, 2)
tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1)
check_exp_map(tangent_vector.double(), th.SE2)
def test_log_map():
for batch_size in [1, 20, 100]:
theta = torch.from_numpy(np.linspace(-np.pi, np.pi, batch_size))
u = torch.randn(batch_size, 2)
tangent_vector = torch.cat([u, theta.unsqueeze(1)], dim=1)
check_log_map(tangent_vector, th.SE2)
check_projection_for_exp_map(tangent_vector, th.SE2)
def test_compose():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2_1 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
se2_2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
check_compose(se2_1, se2_2)
def test_inverse():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
check_inverse(se2)
def test_adjoint():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
tangent = torch.randn(batch_size, 3).double()
check_adjoint(se2, tangent)
def test_copy():
rng = torch.Generator()
se2 = th.SE2.rand(1, generator=rng, dtype=torch.float64)
check_copy_var(se2)
def test_transform_from_and_to():
rng = torch.Generator()
rng.manual_seed(0)
for _ in range(10): # repeat a few times
for batch_size_se2 in [1, 20, 100]:
for batch_size_pnt in [1, 20, 100]:
if (
batch_size_se2 != 1
and batch_size_pnt != 1
and batch_size_pnt != batch_size_se2
):
continue
se2 = th.SE2.rand(batch_size_se2, generator=rng, dtype=torch.float64)
point_tensor = torch.randn(batch_size_pnt, 2).double()
point_tensor_ext = torch.cat(
(point_tensor, torch.ones(batch_size_pnt, 1).double()), dim=1
)
jacobians_to = []
point_to = se2.transform_to(point_tensor, jacobians=jacobians_to)
expected_to = (
se2.inverse().to_matrix() @ point_tensor_ext.unsqueeze(2)
)[:, :2]
jacobians_from = []
point_from = se2.transform_from(point_to, jacobians_from)
# Check the operation result
assert torch.allclose(expected_to.squeeze(2), point_to.data, atol=EPS)
assert torch.allclose(point_tensor, point_from.data, atol=EPS)
# Check the jacobians
expected_jac = numeric_jacobian(
lambda groups: groups[0].transform_to(groups[1]),
[se2, th.Point2(point_tensor)],
function_dim=2,
)
assert jacobians_to[0].shape == expected_jac[0].shape
assert jacobians_to[1].shape == expected_jac[1].shape
assert torch.allclose(jacobians_to[0], expected_jac[0])
assert torch.allclose(jacobians_to[1], expected_jac[1])
expected_jac = numeric_jacobian(
lambda groups: groups[0].transform_from(groups[1]),
[se2, point_to],
function_dim=2,
)
assert jacobians_from[0].shape == expected_jac[0].shape
assert jacobians_from[1].shape == expected_jac[1].shape
assert torch.allclose(jacobians_from[0], expected_jac[0])
assert torch.allclose(jacobians_from[1], expected_jac[1])
def test_xy_jacobian():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
jacobian = []
se2.xy(jacobians=jacobian)
expected_jac = numeric_jacobian(
lambda groups: th.Point2(groups[0].xy()), [se2], function_dim=2
)
torch.allclose(jacobian[0], expected_jac[0])
def test_theta_jacobian():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se2 = th.SE2.rand(batch_size, generator=rng, dtype=torch.float64)
jacobian = []
se2.theta(jacobians=jacobian)
expected_jac = numeric_jacobian(
lambda groups: th.Vector(data=groups[0].theta()), [se2], function_dim=1
)
torch.allclose(jacobian[0], expected_jac[0])
def test_projection():
rng = torch.Generator()
rng.manual_seed(0)
for _ in range(10): # repeat a few times
for batch_size in [1, 20, 100]:
# Test SE2.transform_to
check_projection_for_rotate_and_transform(
th.SE2, th.Point2, th.SE2.transform_to, batch_size, rng
)
# Test SE2.transform_from
check_projection_for_rotate_and_transform(
th.SE2, th.Point2, th.SE2.transform_from, batch_size, rng
)
# Test SE2.compose
check_projection_for_compose(th.SE2, batch_size, rng)
# Test SE2.inverse
check_projection_for_inverse(th.SE2, batch_size, rng)
|
recipyCmd/recipycmd.py | robintw/recipy | 451 | 11159112 | #!/usr/bin/env python
"""recipy - a frictionless provenance tool for Python
Usage:
recipy search [options] <outputfile>
recipy latest [options]
recipy gui [options]
recipy annotate [<idvalue>]
recipy pm [--format=<rst|plain>]
recipy (-h | --help)
recipy --version
Options:
-h --help Show this screen
--version Show version
-p --filepath Search based on filepath rather than hash
-f --fuzzy Use fuzzy searching on filename
-r --regex Use regex searching on filename
-i --id Search based on (a fragment of) the run ID
-a --all Show all results (otherwise just latest result given)
-v --verbose Be verbose
-d --diff Show diff
-j --json Show output as JSON
--no-browser Do not open browser window
--debug Turn on debugging mode
"""
import os
import re
import sys
import tempfile
from docopt import docopt
from jinja2 import Template
from tinydb import where, Query
from json import dumps
import six
from . import __version__
from recipyCommon import config, utils
from recipyCommon.config import get_editor
from recipyCommon.version_control import hash_file
from colorama import init
init()
db = utils.open_or_create_db()
template_str = """\aRun ID:\b {{ unique_id }}
\aCreated by\b {{ author }} on {{ date }} UTC
\aRan\b {{ script }} using {{ command }}
{% if command_args|length > 0 %}
Using command-line arguments: {{ command_args }}
{% endif %}
{% if gitcommit is defined %}
\aGit:\b commit {{ gitcommit }}, in repo {{ gitrepo }}, with origin {{ gitorigin }}
{% endif %}
{% if svnrepo is defined %}
\aSvn:\b commit {{ svncommit }}, in repo {{ svnrepo }}.
{% endif %}
\aEnvironment:\b {{ environment|join(", ") }}
{% if libraries is defined %}
\aLibraries:\b {{ libraries|join(", ") }}
{% endif %}
{% if exception is defined %}
\aException:\b ({{ exception.type }}) {{ exception.message }}
{% endif %}
{% if inputs|length == 0 %}
\aInputs:\b none
{% else %}
\aInputs:\b
{% for input in inputs %}
{% if input is string %}
{{ input }}
{% else %}
{{ input[0] }} ({{ input[1] }})
{% endif %}
{% endfor %}
{% endif %}
{% if outputs | length == 0 %}
\aOutputs:\b none
{% else %}
\aOutputs:\b
{% for output in outputs %}
{% if output is string %}
{{ output }}
{% else %}
{{ output[0] }} ({{ output[1] }})
{% endif %}
{% endfor %}
{% endif %}
{% if notes is defined %}
\aNotes:\b
{{ notes }}
{% endif %}
"""
BOLD = '\033[1m'
RESET = '\033[0m'
template_str_withcolor = template_str.replace('\a', BOLD).replace('\b', RESET)
template_str_nocolor = template_str.replace('\a', '').replace('\b', '')
def template_result(r, nocolor=False):
# Print a single result from the search
if nocolor:
template_str = template_str_nocolor
else:
template_str = template_str_withcolor
template = Template(template_str, trim_blocks=True)
return template.render(**r)
def main():
"""
Main function for recipy command-line script
"""
args = docopt(__doc__, version='recipy v%s' % __version__)
if args['--debug']:
print('Command-line arguments: ')
print(args)
print('DB path: ', config.get_db_path())
print('')
print('Full config file (as interpreted):')
print('----------------------------------')
conf = config.read_config_file()
s = six.StringIO()
conf.write(s)
print(s.getvalue())
print('----------------------------------')
if args['search']:
search(args)
elif args['latest']:
latest(args)
elif args['gui']:
gui(args)
elif args['annotate']:
annotate(args)
elif args['pm']:
patched_modules(args)
def annotate(args):
# get the text editor
editor = get_editor()
if args['<idvalue>']:
try:
run = db.search(where('unique_id') == args['<idvalue>'])[0]
except IndexError:
print('Could not find id %s' % args['<idvalue>'])
return
else:
run = get_latest_run()
# Get temp filename
f = tempfile.NamedTemporaryFile(delete=False, mode='w')
if run.get('notes'):
f.write(run['notes'])
# Write something to the bottom of it
f.write('\n' + '-' * 80 + '\n')
f.write('\n')
f.write('Enter your notes on this run above this line')
f.write('\n' * 3)
f.write(template_result(run, nocolor=True))
f.close()
# Open your editor
os.system('%s %s' % (editor, f.name))
# Grab the text
annotation = ""
with open(f.name, 'r') as f:
for line in f:
if line == '-' * 80 + '\n':
break
annotation += line
notes = annotation.strip()
if notes == "":
print('No annotation entered, exiting.')
return
# Store in the DB
db.update({'notes': notes}, where('unique_id') == run['unique_id'])
db.close()
def gui(args):
"""
Loads recipy GUI from the command-line
"""
from recipyGui import recipyGui
import threading
import webbrowser
import socket
def get_free_port():
port = None
base_port = config.get_gui_port()
for trial_port in range(base_port, base_port + 5):
try:
s = socket.socket()
s.bind(('', trial_port))
s.close()
port = trial_port
break
except Exception:
# port already bound
# Please note that this also happens when the gui is run in
# debug mode!
pass
if not port:
# no free ports above, fall back to random
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
port = get_free_port()
url = "http://127.0.0.1:{0}".format(port)
if not args['--no-browser']:
# Give the application some time before it starts
threading.Timer(1.25, lambda: webbrowser.open(url)).start()
# Turn off reloading by setting debug = False (this also fixes starting the
# application twice)
recipyGui.run(debug=args['--debug'], port=port)
def get_latest_run():
results = db.all()
# If no runs in the database
if len(results) == 0:
return None
results = [_change_date(result) for result in results]
# Sort the results
results = sorted(results, key=lambda x: x['date'])
return results[-1]
def latest(args):
run = get_latest_run()
if not run:
if args['--json']:
print('[]')
return
else:
print("Database is empty")
return
if args['--json']:
output = dumps(run, indent=2, sort_keys=True, default=utils.json_serializer)
print(output)
else:
print(template_result(run))
if args['--diff']:
if 'diff' in run:
print("\n\n")
print(run['diff'])
def find_by_hash(x, val):
for output in x:
if isinstance(output, six.string_types):
# If it's just a string it doesn't have a hash
# so skip it
return False
else:
test_val = output[1]
if test_val == val:
return True
def find_by_filepath(x, val):
for output in x:
if isinstance(output, six.string_types):
test_val = output
else:
test_val = output[0]
if test_val == val:
return True
def find_by_regex(x, val):
for output in x:
if isinstance(output, six.string_types):
test_val = output
else:
test_val = output[0]
if re.match(val, test_val):
return True
def search_hash(args):
try:
hash_value = hash_file(args['<outputfile>'])
except Exception:
# Probably an invalid filename/path so assume it is a raw hash value instead
hash_value = args['<outputfile>']
Run = Query()
# Search both outputs AND inputs
# TODO: Add a command-line argument to force searching of just one
# of inputs or outputs
results = db.search(Run.outputs.test(find_by_hash, hash_value))
results += db.search(Run.inputs.test(find_by_hash, hash_value))
results = sorted(results, key=lambda x: x['date'])
if args['--json']:
if len(results) == 0:
print('[]')
return
if args['--all']:
res_to_output = results
else:
res_to_output = results[-1]
output = dumps(res_to_output, indent=2, sort_keys=True, default=utils.json_serializer)
print(output)
else:
if len(results) == 0:
print('No results found')
else:
if args['--all']:
for r in results[:-1]:
print(template_result(r))
print("-" * 40)
print(template_result(results[-1]))
else:
print(template_result(results[-1]))
if len(results) > 1:
print("** Previous runs have been "
"found. Run with --all to show. **")
if args['--diff']:
if 'diff' in results[-1]:
print("\n\n")
print(results[-1]['diff'])
db.close()
def search(args):
if args['--fuzzy'] or args['--id'] or args['--regex'] or args['--filepath']:
search_text(args)
else:
search_hash(args)
def search_text(args):
filename = args['<outputfile>']
Run = Query()
if args['--fuzzy']:
results = db.search(Run.outputs.test(find_by_regex, ".+%s.+" % filename))
results += db.search(Run.inputs.test(find_by_regex, ".+%s.+" % filename))
elif args['--regex']:
results = db.search(Run.outputs.test(find_by_regex, filename))
results += db.search(Run.inputs.test(find_by_regex, filename))
elif args['--id']:
results = db.search(where('unique_id').matches('%s.*' % filename))
# Automatically turn on display of all results so we don't misleadingly
# suggest that their shortened ID is unique when it isn't
args['--all'] = True
elif args['--filepath']:
results = db.search(Run.outputs.test(find_by_filepath, os.path.abspath(filename)))
results += db.search(Run.inputs.test(find_by_filepath, os.path.abspath(filename)))
else:
print('Unknown arguments')
print(__doc__)
return
# Sort the results
results = sorted(results, key=lambda x: x['date'])
if args['--json']:
if len(results) == 0:
print('[]')
return
if args['--all']:
res_to_output = results
else:
res_to_output = results[-1]
output = dumps(res_to_output, indent=2, sort_keys=True, default=utils.json_serializer)
print(output)
else:
if len(results) == 0:
print("No results found")
else:
if args['--all']:
for r in results[:-1]:
print(template_result(r))
print("-" * 40)
print(template_result(results[-1]))
else:
print(template_result(results[-1]))
if len(results) > 1:
print("** Previous runs have been "
"found. Run with --all to show. **")
if args['--diff']:
if 'diff' in results[-1]:
print("\n\n")
print(results[-1]['diff'])
db.close()
def _change_date(result):
result['date'] = str(result['date']).replace('{TinyDate}:', '')
return result
def patched_modules(args):
modules = db.table('patches').all()
db.close()
fmt = args.get('--format', 'plain')
print(fmt)
try:
from tabulate import tabulate
except ImportError:
print('Please install tabulate for printing the patched modules.')
return
table = []
for m in modules:
t = {}
for k, v in m.items():
if isinstance(v, list):
if len(v) > 0:
if fmt == 'rst':
t[k] = '``{}``'.format('``,\n``'.join(v))
else:
t[k] = '{}'.format('\n'.join(v))
else:
t[k] = ''
else:
if fmt == 'rst':
t[k] = '``{}``'.format(v)
else:
t[k] = v
table.append(t)
print(tabulate(table, headers='keys', tablefmt='rst'))
if __name__ == '__main__':
main()
|
PhysicsTools/PatUtils/python/bJetOperatingPointsParameters_cfi.py | ckamtsikis/cmssw | 852 | 11159129 | <filename>PhysicsTools/PatUtils/python/bJetOperatingPointsParameters_cfi.py<gh_stars>100-1000
# preliminary b-tagging Operating Points
# obtained with cmssw_2_1_0_pre6
# qcd validation /store/relval/2008/6/22/RelVal-RelValQCD_Pt_80_120-1213987236-IDEAL_V2-2nd/0003/
# corrected pt 30 |eta| <2.4 taggability >2
#
import FWCore.ParameterSet.Config as cms
BJetOperatingPointsParameters = cms.PSet(
BJetOperatingPoints = cms.PSet(
DefaultBdisc = cms.string('trackCountingHighEffBJetTags'),
DefaultOp = cms.string('Loose'),
discCutTight = cms.vdouble(
13.76, 3.943, #TCHE, TCHP,
0.7322, 3.335, #JTP, JBTP,
3.524, 0.9467, #SSV, CSV,
0.9635, 0.9462, #MSV, IPM,
0.5581, 0.2757, 0.349 #SET, SMT, SMNoIPT
),
discCutMedium = cms.vdouble(
4.433, 2.53,
0.5114, 2.295,
2.13, 0.8339,
0.8131, 0.8141,
0.1974, 0.1208, 0.1846
),
discCutLoose = cms.vdouble(
1.993, 1.678,
0.2395, 1.149,
1.2, 0.415,
0.4291, 0.3401,
0.0, 0.0, 0.0
),
bdiscriminators = cms.vstring(
'trackCountingHighEffBJetTags','trackCountingHighPurBJetTags',
'jetProbabilityBJetTags','jetBProbabilityBJetTags',
'simpleSecondaryVertexBJetTags','combinedSecondaryVertexBJetTags',
'combinedSecondaryVertexMVABJetTags','impactParameterMVABJetTags',
'softElectronBJetTags','softMuonBJetTags','softMuonNoIPBJetTags'
)
)
)
|
google_play_scraper/utils/__init__.py | shikher-chhawchharia/google-play-scraper | 325 | 11159132 | <reponame>shikher-chhawchharia/google-play-scraper
def nested_lookup(source, indexes):
if len(indexes) == 1:
return source[indexes[0]]
return nested_lookup(source[indexes[0]], indexes[1::])
|
apc/model.py | voidism/Mockingjay-Speech-Representation | 105 | 11159133 | <gh_stars>100-1000
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ apc/model.py ]
# Synopsis [ implementation of the apc model ]
# Author [ <NAME> ]
# Copyright [ https://github.com/iamyuanchung/Autoregressive-Predictive-Coding ]
# Reference [ https://arxiv.org/abs/1904.03240 ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import torch
from torch import nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
class Prenet(nn.Module):
"""Prenet is a multi-layer fully-connected network with ReLU activations.
During training and testing (feature extraction), each input frame is passed
into the Prenet, and the Prenet output is fed to the RNN.
If no Prenet configuration is given, the input frames will be directly fed to
the RNN without any transformation.
"""
def __init__(self, input_size, num_layers, hidden_size, dropout):
super(Prenet, self).__init__()
input_sizes = [input_size] + [hidden_size] * (num_layers - 1)
output_sizes = [hidden_size] * num_layers
# Don't get confused by the conv operation here -- since kernel_size and stride
# are both 1, the operation here is equivalent to a fully-connected network.
self.layers = nn.ModuleList(
[nn.Conv1d(in_channels=in_size, out_channels=out_size, kernel_size=1, stride=1)
for (in_size, out_size) in zip(input_sizes, output_sizes)])
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
def forward(self, inputs):
# inputs: (batch_size, seq_len, mel_dim)
inputs = torch.transpose(inputs, 1, 2)
# inputs: (batch_size, mel_dim, seq_len) -- for conv1d operation
for layer in self.layers:
inputs = self.dropout(self.relu(layer(inputs)))
# inputs: (batch_size, last_dim, seq_len)
return torch.transpose(inputs, 1, 2)
# inputs: (batch_size, seq_len, last_dim) -- back to the original shape
class Postnet(nn.Module):
"""Postnet is a simple linear layer for predicting the target frames given the
RNN context during training. We don't need the Postnet for feature extraction.
"""
def __init__(self, input_size, output_size=80):
super(Postnet, self).__init__()
self.layer = nn.Conv1d(
in_channels=input_size, out_channels=output_size, kernel_size=1, stride=1)
def forward(self, inputs):
# inputs: (batch_size, seq_len, hidden_size)
inputs = torch.transpose(inputs, 1, 2)
# inputs: (batch_size, hidden_size, seq_len) -- for conv1d operation
return torch.transpose(self.layer(inputs), 1, 2)
# (batch_size, seq_len, output_size) -- back to the original shape
class APCModel(nn.Module):
"""This class defines Autoregressive Predictive Coding (APC), a model that
learns to extract general speech features from unlabeled speech data. These
features are shown to contain rich speaker and phone information, and are
useful for a wide range of downstream tasks such as speaker verification
and phone classification.
An APC model consists of a Prenet (optional), a multi-layer GRU network,
and a Postnet. For each time step during training, the Prenet transforms
the input frame into a latent representation, which is then consumed by
the GRU network for generating internal representations across the layers.
Finally, the Postnet takes the output of the last GRU layer and attempts to
predict the target frame.
After training, to extract features from the data of your interest, which
do not have to be i.i.d. with the training data, simply feed-forward the
the data through the APC model, and take the the internal representations
(i.e., the GRU hidden states) as the extracted features and use them in
your tasks.
"""
def __init__(self, mel_dim, prenet_config, rnn_config):
super(APCModel, self).__init__()
self.mel_dim = mel_dim
if prenet_config is not None:
# Make sure the dimensionalities are correct
assert prenet_config.input_size == mel_dim
assert prenet_config.hidden_size == rnn_config.input_size
assert rnn_config.input_size == rnn_config.hidden_size
self.prenet = Prenet(
input_size=prenet_config.input_size,
num_layers=prenet_config.num_layers,
hidden_size=prenet_config.hidden_size,
dropout=prenet_config.dropout)
else:
assert rnn_config.input_size == mel_dim
self.prenet = None
in_sizes = [rnn_config.input_size] + [rnn_config.hidden_size] * (rnn_config.num_layers - 1)
out_sizes = [rnn_config.hidden_size] * rnn_config.num_layers
self.rnns = nn.ModuleList(
[nn.GRU(input_size=in_size, hidden_size=out_size, batch_first=True)
for (in_size, out_size) in zip(in_sizes, out_sizes)])
self.rnn_dropout = nn.Dropout(rnn_config.dropout)
self.rnn_residual = rnn_config.residual
self.postnet = Postnet(
input_size=rnn_config.hidden_size,
output_size=self.mel_dim)
def forward(self, inputs, lengths):
"""Forward function for both training and testing (feature extraction).
input:
inputs: (batch_size, seq_len, mel_dim)
lengths: (batch_size,)
return:
predicted_mel: (batch_size, seq_len, mel_dim)
internal_reps: (num_layers + x, batch_size, seq_len, rnn_hidden_size),
where x is 1 if there's a prenet, otherwise 0
"""
seq_len = inputs.size(1)
if self.prenet is not None:
rnn_inputs = self.prenet(inputs)
# rnn_inputs: (batch_size, seq_len, rnn_input_size)
internal_reps = [rnn_inputs]
# also include prenet_outputs in internal_reps
else:
rnn_inputs = inputs
internal_reps = []
packed_rnn_inputs = pack_padded_sequence(rnn_inputs, lengths, True)
for i, layer in enumerate(self.rnns):
packed_rnn_outputs, _ = layer(packed_rnn_inputs)
rnn_outputs, _ = pad_packed_sequence(
packed_rnn_outputs, True, total_length=seq_len)
# outputs: (batch_size, seq_len, rnn_hidden_size)
if i + 1 < len(self.rnns):
# apply dropout except the last rnn layer
rnn_outputs = self.rnn_dropout(rnn_outputs)
rnn_inputs, _ = pad_packed_sequence(
packed_rnn_inputs, True, total_length=seq_len)
# rnn_inputs: (batch_size, seq_len, rnn_hidden_size)
if self.rnn_residual and rnn_inputs.size(-1) == rnn_outputs.size(-1):
# Residual connections
rnn_outputs = rnn_outputs + rnn_inputs
internal_reps.append(rnn_outputs)
packed_rnn_inputs = pack_padded_sequence(rnn_outputs, lengths, True)
predicted_mel = self.postnet(rnn_outputs)
# predicted_mel: (batch_size, seq_len, mel_dim)
internal_reps = torch.stack(internal_reps)
return predicted_mel, internal_reps
# predicted_mel is only for training; internal_reps is the extracted features
|
src/blockchain/azext_blockchain/vendored_sdks/blockchain/models/_models_py3.py | Mannan2812/azure-cli-extensions | 207 | 11159166 | <gh_stars>100-1000
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
class ApiKey(msrest.serialization.Model):
"""API key payload which is exposed in the request/response of the resource provider.
:param key_name: Gets or sets the API key name.
:type key_name: str
:param value: Gets or sets the API key value.
:type value: str
"""
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
key_name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
super(ApiKey, self).__init__(**kwargs)
self.key_name = key_name
self.value = value
class ApiKeyCollection(msrest.serialization.Model):
"""Collection of the API key payload which is exposed in the response of the resource provider.
:param keys: Gets or sets the collection of API key.
:type keys: list[~azure.mgmt.blockchain.models.ApiKey]
"""
_attribute_map = {
'keys': {'key': 'keys', 'type': '[ApiKey]'},
}
def __init__(
self,
*,
keys: Optional[List["ApiKey"]] = None,
**kwargs
):
super(ApiKeyCollection, self).__init__(**kwargs)
self.keys = keys
class Resource(msrest.serialization.Model):
"""The core properties of the resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the service - e.g. "Microsoft.Blockchain".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class TrackedResource(Resource):
"""The resource model definition for a top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the service - e.g. "Microsoft.Blockchain".
:vartype type: str
:param location: The GEO location of the blockchain service.
:type location: str
:param tags: A set of tags. Tags of the service which is a list of key value pairs that
describes the resource.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
class BlockchainMember(TrackedResource):
"""Payload of the blockchain member which is exposed in the request/response of the resource provider.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the service - e.g. "Microsoft.Blockchain".
:vartype type: str
:param location: The GEO location of the blockchain service.
:type location: str
:param tags: A set of tags. Tags of the service which is a list of key value pairs that
describes the resource.
:type tags: dict[str, str]
:param sku: Gets or sets the blockchain member Sku.
:type sku: ~azure.mgmt.blockchain.models.Sku
:param protocol: Gets or sets the blockchain protocol. Possible values include: "NotSpecified",
"Parity", "Quorum", "Corda".
:type protocol: str or ~azure.mgmt.blockchain.models.BlockchainProtocol
:param validator_nodes_sku: Gets or sets the blockchain validator nodes Sku.
:type validator_nodes_sku: ~azure.mgmt.blockchain.models.BlockchainMemberNodesSku
:ivar provisioning_state: Gets or sets the blockchain member provision state. Possible values
include: "NotSpecified", "Updating", "Deleting", "Succeeded", "Failed", "Stale".
:vartype provisioning_state: str or
~azure.mgmt.blockchain.models.BlockchainMemberProvisioningState
:ivar dns: Gets the dns endpoint of the blockchain member.
:vartype dns: str
:ivar user_name: Gets the auth user name of the blockchain member.
:vartype user_name: str
:param password: Sets the basic auth password of the blockchain member.
:type password: str
:param consortium: Gets or sets the consortium for the blockchain member.
:type consortium: str
:ivar consortium_management_account_address: Gets the managed consortium management account
address.
:vartype consortium_management_account_address: str
:param consortium_management_account_password: Sets the managed consortium management account
password.
:type consortium_management_account_password: str
:param consortium_role: Gets the role of the member in the consortium.
:type consortium_role: str
:param consortium_member_display_name: Gets the display name of the member in the consortium.
:type consortium_member_display_name: str
:ivar root_contract_address: Gets the Ethereum root contract address of the blockchain.
:vartype root_contract_address: str
:ivar public_key: Gets the public key of the blockchain member (default transaction node).
:vartype public_key: str
:param firewall_rules: Gets or sets firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'dns': {'readonly': True},
'user_name': {'readonly': True},
'consortium_management_account_address': {'readonly': True},
'root_contract_address': {'readonly': True},
'public_key': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'validator_nodes_sku': {'key': 'properties.validatorNodesSku', 'type': 'BlockchainMemberNodesSku'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'dns': {'key': 'properties.dns', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'consortium': {'key': 'properties.consortium', 'type': 'str'},
'consortium_management_account_address': {'key': 'properties.consortiumManagementAccountAddress', 'type': 'str'},
'consortium_management_account_password': {'key': 'properties.consortiumManagementAccountPassword', 'type': 'str'},
'consortium_role': {'key': 'properties.consortiumRole', 'type': 'str'},
'consortium_member_display_name': {'key': 'properties.consortiumMemberDisplayName', 'type': 'str'},
'root_contract_address': {'key': 'properties.rootContractAddress', 'type': 'str'},
'public_key': {'key': 'properties.publicKey', 'type': 'str'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[FirewallRule]'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
protocol: Optional[Union[str, "BlockchainProtocol"]] = None,
validator_nodes_sku: Optional["BlockchainMemberNodesSku"] = None,
password: Optional[str] = None,
consortium: Optional[str] = None,
consortium_management_account_password: Optional[str] = None,
consortium_role: Optional[str] = None,
consortium_member_display_name: Optional[str] = None,
firewall_rules: Optional[List["FirewallRule"]] = None,
**kwargs
):
super(BlockchainMember, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.protocol = protocol
self.validator_nodes_sku = validator_nodes_sku
self.provisioning_state = None
self.dns = None
self.user_name = None
self.password = password
self.consortium = consortium
self.consortium_management_account_address = None
self.consortium_management_account_password = <PASSWORD>account_password
self.consortium_role = consortium_role
self.consortium_member_display_name = consortium_member_display_name
self.root_contract_address = None
self.public_key = None
self.firewall_rules = firewall_rules
class BlockchainMemberCollection(msrest.serialization.Model):
"""Collection of the blockchain member payload which is exposed in the request/response of the resource provider.
:param value: Gets or sets the collection of blockchain members.
:type value: list[~azure.mgmt.blockchain.models.BlockchainMember]
:param next_link: Gets or sets the URL, that the client should use to fetch the next page (per
server side paging).
It's null for now, added for future use.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BlockchainMember]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["BlockchainMember"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(BlockchainMemberCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class BlockchainMemberNodesSku(msrest.serialization.Model):
"""Payload of the blockchain member nodes Sku for a blockchain member.
:param capacity: Gets or sets the nodes capacity.
:type capacity: int
"""
_attribute_map = {
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
capacity: Optional[int] = None,
**kwargs
):
super(BlockchainMemberNodesSku, self).__init__(**kwargs)
self.capacity = capacity
class TransactionNodePropertiesUpdate(msrest.serialization.Model):
"""Update the payload of the transaction node properties in the transaction node payload.
:param password: Sets the transaction node dns endpoint basic auth password.
:type password: str
:param firewall_rules: Gets or sets the firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
"""
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'firewall_rules': {'key': 'firewallRules', 'type': '[FirewallRule]'},
}
def __init__(
self,
*,
password: Optional[str] = None,
firewall_rules: Optional[List["FirewallRule"]] = None,
**kwargs
):
super(TransactionNodePropertiesUpdate, self).__init__(**kwargs)
self.password = password
self.firewall_rules = firewall_rules
class BlockchainMemberPropertiesUpdate(TransactionNodePropertiesUpdate):
"""Update the payload of the blockchain member properties for a blockchain member.
:param password: Sets the transaction node dns endpoint basic auth password.
:type password: str
:param firewall_rules: Gets or sets the firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
:param consortium_management_account_password: Sets the managed consortium management account
password.
:type consortium_management_account_password: str
"""
_attribute_map = {
'password': {'key': 'password', 'type': 'str'},
'firewall_rules': {'key': 'firewallRules', 'type': '[FirewallRule]'},
'consortium_management_account_password': {'key': 'consortiumManagementAccountPassword', 'type': 'str'},
}
def __init__(
self,
*,
password: Optional[str] = None,
firewall_rules: Optional[List["FirewallRule"]] = None,
consortium_management_account_password: Optional[str] = None,
**kwargs
):
super(BlockchainMemberPropertiesUpdate, self).__init__(password=password, firewall_rules=firewall_rules, **kwargs)
self.consortium_management_account_password = consortium_management_account_password
class BlockchainMemberUpdate(msrest.serialization.Model):
"""Update the payload of the blockchain member which is exposed in the request/response of the resource provider.
:param tags: A set of tags. Tags of the service which is a list of key value pairs that
describes the resource.
:type tags: dict[str, str]
:param password: Sets the transaction node dns endpoint basic auth password.
:type password: str
:param firewall_rules: Gets or sets the firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
:param consortium_management_account_password: Sets the managed consortium management account
password.
:type consortium_management_account_password: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'password': {'key': 'properties.password', 'type': 'str'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[FirewallRule]'},
'consortium_management_account_password': {'key': 'properties.consortiumManagementAccountPassword', 'type': 'str'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
password: Optional[str] = None,
firewall_rules: Optional[List["FirewallRule"]] = None,
consortium_management_account_password: Optional[str] = None,
**kwargs
):
super(BlockchainMemberUpdate, self).__init__(**kwargs)
self.tags = tags
self.password = password
self.firewall_rules = firewall_rules
self.consortium_management_account_password = <PASSWORD>
class Consortium(msrest.serialization.Model):
"""Consortium payload.
:param name: Gets or sets the blockchain member name.
:type name: str
:param protocol: Gets or sets the protocol for the consortium. Possible values include:
"NotSpecified", "Parity", "Quorum", "Corda".
:type protocol: str or ~azure.mgmt.blockchain.models.BlockchainProtocol
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
protocol: Optional[Union[str, "BlockchainProtocol"]] = None,
**kwargs
):
super(Consortium, self).__init__(**kwargs)
self.name = name
self.protocol = protocol
class ConsortiumCollection(msrest.serialization.Model):
"""Collection of the consortium payload.
:param value: Gets or sets the collection of consortiums.
:type value: list[~azure.mgmt.blockchain.models.Consortium]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Consortium]'},
}
def __init__(
self,
*,
value: Optional[List["Consortium"]] = None,
**kwargs
):
super(ConsortiumCollection, self).__init__(**kwargs)
self.value = value
class ConsortiumMember(msrest.serialization.Model):
"""Consortium approval.
:param name: Gets the consortium member name.
:type name: str
:param display_name: Gets the consortium member display name.
:type display_name: str
:param subscription_id: Gets the consortium member subscription id.
:type subscription_id: str
:param role: Gets the consortium member role.
:type role: str
:param status: Gets the consortium member status.
:type status: str
:param join_date: Gets the consortium member join date.
:type join_date: ~datetime.datetime
:param date_modified: Gets the consortium member modified date.
:type date_modified: ~datetime.datetime
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'join_date': {'key': 'joinDate', 'type': 'iso-8601'},
'date_modified': {'key': 'dateModified', 'type': 'iso-8601'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
subscription_id: Optional[str] = None,
role: Optional[str] = None,
status: Optional[str] = None,
join_date: Optional[datetime.datetime] = None,
date_modified: Optional[datetime.datetime] = None,
**kwargs
):
super(ConsortiumMember, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.subscription_id = subscription_id
self.role = role
self.status = status
self.join_date = join_date
self.date_modified = date_modified
class ConsortiumMemberCollection(msrest.serialization.Model):
"""Collection of consortium payload.
:param value: Gets or sets the collection of consortiums.
:type value: list[~azure.mgmt.blockchain.models.ConsortiumMember]
:param next_link: Gets or sets the URL, that the client should use to fetch the next page (per
server side paging).
It's null for now, added for future use.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConsortiumMember]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ConsortiumMember"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ConsortiumMemberCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class FirewallRule(msrest.serialization.Model):
"""Ip range for firewall rules.
:param rule_name: Gets or sets the name of the firewall rules.
:type rule_name: str
:param start_ip_address: Gets or sets the start IP address of the firewall rule range.
:type start_ip_address: str
:param end_ip_address: Gets or sets the end IP address of the firewall rule range.
:type end_ip_address: str
"""
_attribute_map = {
'rule_name': {'key': 'ruleName', 'type': 'str'},
'start_ip_address': {'key': 'startIpAddress', 'type': 'str'},
'end_ip_address': {'key': 'endIpAddress', 'type': 'str'},
}
def __init__(
self,
*,
rule_name: Optional[str] = None,
start_ip_address: Optional[str] = None,
end_ip_address: Optional[str] = None,
**kwargs
):
super(FirewallRule, self).__init__(**kwargs)
self.rule_name = rule_name
self.start_ip_address = start_ip_address
self.end_ip_address = end_ip_address
class NameAvailability(msrest.serialization.Model):
"""Name availability payload which is exposed in the response of the resource provider.
:param name_available: Gets or sets the value indicating whether the name is available.
:type name_available: bool
:param message: Gets or sets the message.
:type message: str
:param reason: Gets or sets the name availability reason. Possible values include:
"NotSpecified", "AlreadyExists", "Invalid".
:type reason: str or ~azure.mgmt.blockchain.models.NameAvailabilityReason
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'message': {'key': 'message', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
message: Optional[str] = None,
reason: Optional[Union[str, "NameAvailabilityReason"]] = None,
**kwargs
):
super(NameAvailability, self).__init__(**kwargs)
self.name_available = name_available
self.message = message
self.reason = reason
class NameAvailabilityRequest(msrest.serialization.Model):
"""Name availability request payload which is exposed in the request of the resource provider.
:param name: Gets or sets the name to check.
:type name: str
:param type: Gets or sets the type of the resource to check.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(NameAvailabilityRequest, self).__init__(**kwargs)
self.name = name
self.type = type
class OperationResult(msrest.serialization.Model):
"""Operation result payload which is exposed in the response of the resource provider.
:param name: Gets or sets the operation name.
:type name: str
:param start_time: Gets or sets the operation start time.
:type start_time: ~datetime.datetime
:param end_time: Gets or sets the operation end time.
:type end_time: ~datetime.datetime
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
name: Optional[str] = None,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
**kwargs
):
super(OperationResult, self).__init__(**kwargs)
self.name = name
self.start_time = start_time
self.end_time = end_time
class ResourceProviderOperation(msrest.serialization.Model):
"""Operation payload which is exposed in the response of the resource provider.
:param origin: Gets or sets the origin.
:type origin: str
:param name: Gets or sets the operation name.
:type name: str
:param is_data_action: Gets or sets a value indicating whether the operation is a data action
or not.
:type is_data_action: bool
:param display: Gets or sets operation display.
:type display: ~azure.mgmt.blockchain.models.ResourceProviderOperationDisplay
"""
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'ResourceProviderOperationDisplay'},
}
def __init__(
self,
*,
origin: Optional[str] = None,
name: Optional[str] = None,
is_data_action: Optional[bool] = None,
display: Optional["ResourceProviderOperationDisplay"] = None,
**kwargs
):
super(ResourceProviderOperation, self).__init__(**kwargs)
self.origin = origin
self.name = name
self.is_data_action = is_data_action
self.display = display
class ResourceProviderOperationCollection(msrest.serialization.Model):
"""Collection of operation payload which is exposed in the response of the resource provider.
:param value: Gets or sets the collection of operations.
:type value: list[~azure.mgmt.blockchain.models.ResourceProviderOperation]
:param next_link: Gets or sets the URL, that the client should use to fetch the next page (per
server side paging).
It's null for now, added for future use.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceProviderOperation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ResourceProviderOperation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ResourceProviderOperationCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceProviderOperationDisplay(msrest.serialization.Model):
"""Operation display payload which is exposed in the response of the resource provider.
:param provider: Gets or sets the name of the provider for display purposes.
:type provider: str
:param resource: Gets or sets the name of the resource type for display purposes.
:type resource: str
:param operation: Gets or sets the name of the operation for display purposes.
:type operation: str
:param description: Gets or sets the description of the provider for display purposes.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(ResourceProviderOperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class ResourceTypeSku(msrest.serialization.Model):
"""Resource type Sku.
:param resource_type: Gets or sets the resource type.
:type resource_type: str
:param skus: Gets or sets the Skus.
:type skus: list[~azure.mgmt.blockchain.models.SkuSetting]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'skus': {'key': 'skus', 'type': '[SkuSetting]'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
skus: Optional[List["SkuSetting"]] = None,
**kwargs
):
super(ResourceTypeSku, self).__init__(**kwargs)
self.resource_type = resource_type
self.skus = skus
class ResourceTypeSkuCollection(msrest.serialization.Model):
"""Collection of the resource type Sku.
:param value: Gets or sets the collection of resource type Sku.
:type value: list[~azure.mgmt.blockchain.models.ResourceTypeSku]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceTypeSku]'},
}
def __init__(
self,
*,
value: Optional[List["ResourceTypeSku"]] = None,
**kwargs
):
super(ResourceTypeSkuCollection, self).__init__(**kwargs)
self.value = value
class Sku(msrest.serialization.Model):
"""Blockchain member Sku in payload.
:param name: Gets or sets Sku name.
:type name: str
:param tier: Gets or sets Sku tier.
:type tier: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
class SkuSetting(msrest.serialization.Model):
"""Sku Setting.
:param name: Gets or sets the Sku name.
:type name: str
:param tier: Gets or sets the Sku tier.
:type tier: str
:param locations: Gets or sets the locations.
:type locations: list[str]
:param required_features: Gets or sets the required features.
:type required_features: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'required_features': {'key': 'requiredFeatures', 'type': '[str]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
tier: Optional[str] = None,
locations: Optional[List[str]] = None,
required_features: Optional[List[str]] = None,
**kwargs
):
super(SkuSetting, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.locations = locations
self.required_features = required_features
class TransactionNode(Resource):
"""Payload of the transaction node which is the request/response of the resource provider.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the service - e.g. "Microsoft.Blockchain".
:vartype type: str
:param location: Gets or sets the transaction node location.
:type location: str
:ivar provisioning_state: Gets or sets the blockchain member provision state. Possible values
include: "NotSpecified", "Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or ~azure.mgmt.blockchain.models.NodeProvisioningState
:ivar dns: Gets or sets the transaction node dns endpoint.
:vartype dns: str
:ivar public_key: Gets or sets the transaction node public key.
:vartype public_key: str
:ivar user_name: Gets or sets the transaction node dns endpoint basic auth user name.
:vartype user_name: str
:param password: Sets the transaction node dns endpoint basic auth password.
:type password: str
:param firewall_rules: Gets or sets the firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'dns': {'readonly': True},
'public_key': {'readonly': True},
'user_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'dns': {'key': 'properties.dns', 'type': 'str'},
'public_key': {'key': 'properties.publicKey', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'password': {'key': 'properties.password', 'type': 'str'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[FirewallRule]'},
}
def __init__(
self,
*,
location: Optional[str] = None,
password: Optional[str] = None,
firewall_rules: Optional[List["FirewallRule"]] = None,
**kwargs
):
super(TransactionNode, self).__init__(**kwargs)
self.location = location
self.provisioning_state = None
self.dns = None
self.public_key = None
self.user_name = None
self.password = password
self.firewall_rules = firewall_rules
class TransactionNodeCollection(msrest.serialization.Model):
"""Collection of transaction node payload which is exposed in the request/response of the resource provider.
:param value: Gets or sets the collection of transaction nodes.
:type value: list[~azure.mgmt.blockchain.models.TransactionNode]
:param next_link: Gets or sets the URL, that the client should use to fetch the next page (per
server side paging).
It's null for now, added for future use.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[TransactionNode]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["TransactionNode"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(TransactionNodeCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class TransactionNodeUpdate(msrest.serialization.Model):
"""Update the transaction node payload which is exposed in the request/response of the resource provider.
:param password: Sets the transaction node dns endpoint basic auth password.
:type password: str
:param firewall_rules: Gets or sets the firewall rules.
:type firewall_rules: list[~azure.mgmt.blockchain.models.FirewallRule]
"""
_attribute_map = {
'password': {'key': 'properties.password', 'type': 'str'},
'firewall_rules': {'key': 'properties.firewallRules', 'type': '[FirewallRule]'},
}
def __init__(
self,
*,
password: Optional[str] = None,
firewall_rules: Optional[List["FirewallRule"]] = None,
**kwargs
):
super(TransactionNodeUpdate, self).__init__(**kwargs)
self.password = password
self.firewall_rules = firewall_rules
|
pclpy/__init__.py | sweptlaser/pclpy | 293 | 11159204 | <filename>pclpy/__init__.py
import platform
import pclpy.pcl as pcl
from pclpy.io.functions import read
from pclpy.io.las import read as read_las
from pclpy.io.las import write as write_las
from pclpy.api import (
extract_clusters,
compute_normals,
region_growing,
moving_least_squares,
mls,
radius_outlier_removal,
ror,
octree_voxel_downsample,
fit,
)
|
td4a/controllers/schema.py | mihai-satmarean/td4a | 171 | 11159231 | """ /retrieve
"""
import json
from flask import current_app as app
from flask import request, jsonify, Blueprint
from td4a.models.exception_handler import ExceptionHandler, HandledException
from td4a.models.sort_commented_map import sort_commented_map
from td4a.models.td4ayaml import Td4aYaml
import genson
api_schema = Blueprint('api_schema', __name__) # pylint: disable=invalid-name
@ExceptionHandler
def schema(data, typ):
""" Build schema from data
"""
_ = typ
yaml = Td4aYaml()
obj_data = yaml.load(data['p1'])
json_schema = genson.Schema()
json_schema.add_object(obj_data)
schema_dict = json_schema.to_dict()
schema_yaml = yaml.load(yaml.dump(schema_dict))
sorted_schema_yaml = sort_commented_map(commented_map=schema_yaml)
sorted_schema_string = yaml.dump(sorted_schema_yaml)
return sorted_schema_string
@api_schema.route('/schema', methods=['POST'])
def rest_schema():
""" Build a schema for data
"""
try:
payload = request.json
response = schema(data=payload, typ="data")
return jsonify({"p2": response})
except HandledException as error:
return jsonify(error.json())
|
hybrik/version.py | Jeff-sjtu/HybrIK | 287 | 11159269 | <filename>hybrik/version.py<gh_stars>100-1000
# GENERATED VERSION FILE
# TIME: Mon Apr 5 16:07:46 2021
__version__ = '0.1.0+c9f82ae'
short_version = '0.1.0'
|
solutions/problem_046.py | ksvr444/daily-coding-problem | 1,921 | 11159272 | def is_palindrome(s1):
return s1 == s1[::-1]
def get_longest_palindrome_substring(s):
if not s or is_palindrome(s):
return s
s1 = get_longest_palindrome_substring(s[1:])
s2 = get_longest_palindrome_substring(s[:-1])
return s1 if len(s1) >= len(s2) else s2
assert get_longest_palindrome_substring("aabcdcb") == "bcdcb"
assert get_longest_palindrome_substring("bananas") == "anana"
|
flowtorch/nn/made.py | sankethvedula/flowtorch | 207 | 11159276 | <reponame>sankethvedula/flowtorch
# Copyright (c) Meta Platforms, Inc
from typing import Sequence, Tuple
import torch
import torch.nn as nn
from torch.nn import functional as F
def sample_mask_indices(
input_dim: int, hidden_dim: int, simple: bool = True
) -> torch.Tensor:
"""
Samples the indices assigned to hidden units during the construction of MADE masks
:param input_dim: the dimensionality of the input variable
:param hidden_dim: the dimensionality of the hidden layer
:param simple: True to space fractional indices by rounding to nearest
int, false round randomly
"""
indices = torch.linspace(1, input_dim, steps=hidden_dim, device="cpu").to(
torch.Tensor().device
)
if simple:
# Simple procedure tries to space fractional indices evenly by rounding
# to nearest int
return torch.round(indices)
else:
# "Non-simple" procedure creates fractional indices evenly then rounds
# at random
ints = indices.floor()
ints += torch.bernoulli(indices - ints)
return ints
def create_mask(
input_dim: int,
context_dim: int,
hidden_dims: Sequence[int],
permutation: torch.LongTensor,
output_multiplier: int,
) -> Tuple[Sequence[torch.Tensor], torch.Tensor]:
"""
Creates MADE masks for a conditional distribution
:param input_dim: the dimensionality of the input variable
:param context_dim: the dimensionality of the variable that is
conditioned on (for conditional densities)
:param hidden_dims: the dimensionality of the hidden layers(s)
:param permutation: the order of the input variables
:param output_multipliers: tiles the output (e.g. for when a separate
mean and scale parameter are desired)
"""
# Create mask indices for input, hidden layers, and final layer
# We use 0 to refer to the elements of the variable being conditioned on,
# and range(1:(D_latent+1)) for the input variable
var_index = torch.empty(permutation.shape, dtype=torch.get_default_dtype())
var_index[permutation] = torch.arange(input_dim, dtype=torch.get_default_dtype())
# Create the indices that are assigned to the neurons
input_indices = torch.cat((torch.zeros(context_dim), 1 + var_index))
# For conditional MADE, introduce a 0 index that all the conditioned
# variables are connected to as per Paige and Wood (2016) (see below)
if context_dim > 0:
hidden_indices = [sample_mask_indices(input_dim, h) - 1 for h in hidden_dims]
else:
hidden_indices = [sample_mask_indices(input_dim - 1, h) for h in hidden_dims]
# *** TODO: Fix this line ***
output_indices = (
(var_index + 1).unsqueeze(-1).repeat(1, output_multiplier).reshape(-1)
)
# Create mask from input to output for the skips connections
mask_skip = (output_indices.unsqueeze(-1) > input_indices.unsqueeze(0)).type_as(
var_index
)
# Create mask from input to first hidden layer, and between subsequent
# hidden layers
masks = [
(hidden_indices[0].unsqueeze(-1) >= input_indices.unsqueeze(0)).type_as(
var_index
)
]
for i in range(1, len(hidden_dims)):
masks.append(
(
hidden_indices[i].unsqueeze(-1) >= hidden_indices[i - 1].unsqueeze(0)
).type_as(var_index)
)
# Create mask from last hidden layer to output layer
masks.append(
(output_indices.unsqueeze(-1) > hidden_indices[-1].unsqueeze(0)).type_as(
var_index
)
)
return masks, mask_skip
class MaskedLinear(nn.Linear):
"""
A linear mapping with a given mask on the weights (arbitrary bias)
:param in_features: the number of input features
:param out_features: the number of output features
:param mask: the mask to apply to the in_features x out_features weight matrix
:param bias: whether or not `MaskedLinear` should include a bias term.
defaults to `True`
"""
def __init__(
self, in_features: int, out_features: int, mask: torch.Tensor, bias: bool = True
) -> None:
super().__init__(in_features, out_features, bias)
self.register_buffer("mask", mask.data)
def forward(self, _input: torch.Tensor) -> torch.Tensor:
masked_weight = self.weight * self.mask
return F.linear(_input, masked_weight, self.bias)
|
viewer_states/qLib_camera_zoom_vertigo_ql_dop.py | JosephSilvermanArt/qLib | 572 | 11159316 | import hou
import qLibCameraZoomVertigo
def createViewerStateTemplate():
state_name = "qLib::camera_zoom_vertigo_ql_dop"
state_label = "Camera Zoom/Vertigo (dop) [qL]"
template = hou.ViewerStateTemplate(
state_name,
state_label,
hou.dopNodeTypeCategory(),
#contexts = [ hou.sopNodeTypeCategory(), hou.dopNodeTypeCategory(), hou.lopNodeTypeCategory(), ],
)
template.bindFactory(qLibCameraZoomVertigo.State)
return template
|
venv/lib/python3.8/site-packages/kivy/tests/test_uix_bubble.py | felipesch92/projeto_kivy | 13,889 | 11159317 | <filename>venv/lib/python3.8/site-packages/kivy/tests/test_uix_bubble.py
import pytest
@pytest.mark.parametrize('prop_name', (
'_fills_row_first',
'_fills_from_left_to_right',
'_fills_from_top_to_bottom',
))
def test_a_certain_properties_from_the_super_class_are_overwritten(prop_name):
from kivy.uix.bubble import Bubble
from kivy.uix.gridlayout import GridLayout
assert issubclass(Bubble, GridLayout)
assert getattr(Bubble, prop_name) is not getattr(GridLayout, prop_name)
@pytest.mark.parametrize('orientation', ('vertical', 'horizontal'))
def test_always_lr_tb(orientation):
from kivy.uix.bubble import Bubble
b = Bubble(orientation=orientation)
assert b._fills_row_first
assert b._fills_from_left_to_right
assert b._fills_from_top_to_bottom
|
keyring/credentials.py | davegaeddert/keyring | 834 | 11159362 | import os
import abc
class Credential(metaclass=abc.ABCMeta):
"""Abstract class to manage credentials"""
@abc.abstractproperty
def username(self):
return None
@abc.abstractproperty
def password(self):
return None
class SimpleCredential(Credential):
"""Simple credentials implementation"""
def __init__(self, username, password):
self._username = username
self._password = password
@property
def username(self):
return self._username
@property
def password(self):
return self._password
class EnvironCredential(Credential):
"""
Source credentials from environment variables.
Actual sourcing is deferred until requested.
Supports comparison by equality.
>>> e1 = EnvironCredential('a', 'b')
>>> e2 = EnvironCredential('a', 'b')
>>> e3 = EnvironCredential('a', 'c')
>>> e1 == e2
True
>>> e2 == e3
False
"""
def __init__(self, user_env_var, pwd_env_var):
self.user_env_var = user_env_var
self.pwd_env_var = pwd_env_var
def __eq__(self, other: object) -> bool:
return vars(self) == vars(other)
def _get_env(self, env_var):
"""Helper to read an environment variable"""
value = os.environ.get(env_var)
if not value:
raise ValueError('Missing environment variable:%s' % env_var)
return value
@property
def username(self):
return self._get_env(self.user_env_var)
@property
def password(self):
return self._get_env(self.pwd_env_var)
|
evosax/experimental/subpops/meta.py | RobertTLange/evosax | 102 | 11159363 | <reponame>RobertTLange/evosax
import jax
import jax.numpy as jnp
from typing import Optional, Tuple, List
import chex
from functools import partial
from .batch import BatchStrategy
from ... import Strategies
class MetaStrategy(BatchStrategy):
def __init__(
self,
meta_strategy_name: str,
inner_strategy_name: str,
meta_params: List[str],
num_dims: int,
popsize: int,
num_subpops: int,
meta_strategy_kwargs: dict = {},
inner_strategy_kwargs: dict = {},
communication: str = "independent",
n_devices: Optional[int] = None,
):
# Initialize the batch strategy - subpops of inner strategies
super().__init__(
inner_strategy_name,
num_dims,
popsize,
num_subpops,
inner_strategy_kwargs,
communication,
n_devices,
)
self.meta_strategy_name = meta_strategy_name
self.meta_params = meta_params
self.num_meta_dims = len(self.meta_params)
self.meta_strategy = Strategies[self.meta_strategy_name](
popsize=self.num_subpops,
num_dims=self.num_meta_dims,
**meta_strategy_kwargs
)
@property
def default_params_meta(self) -> chex.ArrayTree:
"""Return default parameters of meta-evolution strategy."""
base_params = self.meta_strategy.default_params
# Copy over default parameters for init min/init max
init_val = []
for k in self.meta_params:
init_val.append(self.strategy.default_params[k])
base_params["init_min"] = jnp.array(init_val)
base_params["init_max"] = jnp.array(init_val)
return base_params
@partial(jax.jit, static_argnums=(0,))
def ask_meta(
self,
rng: chex.PRNGKey,
meta_state: chex.ArrayTree,
meta_params: chex.ArrayTree,
inner_params: chex.ArrayTree,
) -> Tuple[chex.Array, chex.ArrayTree]:
"""`ask` for meta-parameters of different subpopulations."""
meta_x, meta_state = self.meta_strategy.ask(
rng, meta_state, meta_params
)
meta_x = meta_x.reshape(-1, self.num_meta_dims)
for i, k in enumerate(self.meta_params):
inner_params[k] = meta_x[:, i]
return inner_params, meta_state
@partial(jax.jit, static_argnums=(0,))
def initialize_meta(
self, rng: chex.PRNGKey, meta_params: chex.ArrayTree
) -> chex.ArrayTree:
"""`initialize` the meta-evolution strategy."""
return self.meta_strategy.initialize(rng, meta_params)
@partial(jax.jit, static_argnums=(0,))
def tell_meta(
self,
inner_params: chex.ArrayTree,
fitness: chex.Array,
meta_state: chex.ArrayTree,
meta_params: chex.ArrayTree,
) -> chex.ArrayTree:
"""`tell` performance data for meta-strategy state update."""
# TODO: Default - mean subpop fitness -> more flexible (min/max/median)
batch_fitness = fitness.reshape(self.num_subpops, self.sub_popsize)
meta_fitness = batch_fitness.mean(axis=1)
# Reconstruct meta_x for dict of inner params
meta_x = []
for i, k in enumerate(self.meta_params):
meta_x.append(inner_params[k].reshape(-1, 1))
meta_x = jnp.concatenate(meta_x, axis=1)
# Update the meta strategy
meta_state = self.meta_strategy.tell(
meta_x, meta_fitness, meta_state, meta_params
)
return meta_state
|
opendatasets/utils/archive.py | LippDas/opendatasets | 167 | 11159381 | import os
import tarfile
import zipfile
import gzip
def _is_tarxz(filename):
return filename.endswith(".tar.xz")
def _is_tar(filename):
return filename.endswith(".tar")
def _is_targz(filename):
return filename.endswith(".tar.gz")
def _is_tgz(filename):
return filename.endswith(".tgz")
def _is_gzip(filename):
return filename.endswith(".gz") and not filename.endswith(".tar.gz")
def _is_zip(filename):
return filename.endswith(".zip")
def extract_archive(from_path, to_path=None, remove_finished=False, dry_run=False):
print("Extracting archive " + from_path + " to " + to_path)
if dry_run:
print("This is a dry run, skipping...")
return
if to_path is None:
to_path = os.path.dirname(from_path)
if _is_tar(from_path):
with tarfile.open(from_path, 'r') as tar:
tar.extractall(path=to_path)
elif _is_targz(from_path) or _is_tgz(from_path):
with tarfile.open(from_path, 'r:gz') as tar:
tar.extractall(path=to_path)
elif _is_tarxz(from_path):
with tarfile.open(from_path, 'r:xz') as tar:
tar.extractall(path=to_path)
elif _is_gzip(from_path):
to_path = os.path.join(to_path, os.path.splitext(
os.path.basename(from_path))[0])
with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f:
out_f.write(zip_f.read())
elif _is_zip(from_path):
with zipfile.ZipFile(from_path, 'r') as z:
z.extractall(to_path)
else:
raise ValueError("Extraction of {} not supported".format(from_path))
if remove_finished:
os.remove(from_path)
|
examples/vm_scheduling/rule_based_algorithm/rule_based_algorithm.py | yangboz/maro | 598 | 11159407 | <filename>examples/vm_scheduling/rule_based_algorithm/rule_based_algorithm.py<gh_stars>100-1000
import abc
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload
class RuleBasedAlgorithm(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def allocate_vm(self, decision_event: DecisionPayload, env: Env) -> AllocateAction:
"""This method will determine allocate which PM to the current VM.
"""
pass
|
tests/test_utilities/test_csvjoin.py | Bonifacio2/csvkit | 3,239 | 11159424 | #!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase, EmptyFileTests
class TestCSVJoin(CSVKitTestCase, EmptyFileTests):
Utility = CSVJoin
default_args = ['examples/dummy.csv', '-']
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
def test_single(self):
self.assertRows(['examples/dummy.csv', '--no-inference'], [
['a', 'b', 'c'],
['1', '2', '3'],
])
def test_no_blanks(self):
self.assertRows(['examples/blanks.csv', 'examples/blanks.csv'], [
['a', 'b', 'c', 'd', 'e', 'f', 'a2', 'b2', 'c2', 'd2', 'e2', 'f2'],
['', '', '', '', '', '', '', '', '', '', '', ''],
])
def test_blanks(self):
self.assertRows(['--blanks', 'examples/blanks.csv', 'examples/blanks.csv'], [
['a', 'b', 'c', 'd', 'e', 'f', 'a2', 'b2', 'c2', 'd2', 'e2', 'f2'],
['', 'NA', 'N/A', 'NONE', 'NULL', '.', '', 'NA', 'N/A', 'NONE', 'NULL', '.'],
])
def test_no_header_row(self):
output = self.get_output_as_io(
['-c', '1', '--no-header-row', 'examples/join_a.csv', 'examples/join_no_header_row.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_no_inference(self):
self.assertRows(['--no-inference', 'examples/join_a.csv', 'examples/join_short.csv'], [
['a', 'b', 'c', 'a2', 'b2', 'c2', 'b2_2', 'c2_2'],
['1', 'b', 'c', '1', 'b', '', 'b', 'c'],
['2', 'b', 'c', '1', 'b', '', 'b', 'c'],
['3', 'b', 'c', '', '', '', '', ''],
])
def test_sniff_limit_no_limit(self):
self.assertRows(['examples/join_a.csv', 'examples/sniff_limit.csv'], [
['a', 'b', 'c', 'a2', 'b2', 'c2'],
['1', 'b', 'c', 'True', '2', '3'],
['2', 'b', 'c', '', '', ''],
['3', 'b', 'c', '', '', ''],
])
def test_sniff_limit_zero_limit(self):
self.assertRows(['--snifflimit', '0', 'examples/join_a.csv', 'examples/sniff_limit.csv'], [
['a', 'b', 'c', 'a;b;c'],
['1', 'b', 'c', '1;2;3'],
['2', 'b', 'c', ''],
['3', 'b', 'c', ''],
])
|
Python/Syntax/Concatenation.py | piovezan/SOpt | 148 | 11159439 | <gh_stars>100-1000
variavel = input('informe um valor')
print('{' + variavel + '}')
#https://pt.stackoverflow.com/q/444281/101
|
examples/pytorch/compGCN/data_loader.py | ketyi/dgl | 9,516 | 11159464 | import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import dgl
from collections import defaultdict as ddict
from ordered_set import OrderedSet
class TrainDataset(Dataset):
"""
Training Dataset class.
Parameters
----------
triples: The triples used for training the model
num_ent: Number of entities in the knowledge graph
lbl_smooth: Label smoothing
Returns
-------
A training Dataset class instance used by DataLoader
"""
def __init__(self, triples, num_ent, lbl_smooth):
self.triples = triples
self.num_ent = num_ent
self.lbl_smooth = lbl_smooth
self.entities = np.arange(self.num_ent, dtype=np.int32)
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
ele = self.triples[idx]
triple, label = torch.LongTensor(ele['triple']), np.int32(ele['label'])
trp_label = self.get_label(label)
#label smoothing
if self.lbl_smooth != 0.0:
trp_label = (1.0 - self.lbl_smooth) * trp_label + (1.0 / self.num_ent)
return triple, trp_label
@staticmethod
def collate_fn(data):
triples = []
labels = []
for triple, label in data:
triples.append(triple)
labels.append(label)
triple = torch.stack(triples, dim=0)
trp_label = torch.stack(labels, dim=0)
return triple, trp_label
#for edges that exist in the graph, the entry is 1.0, otherwise the entry is 0.0
def get_label(self, label):
y = np.zeros([self.num_ent], dtype=np.float32)
for e2 in label:
y[e2] = 1.0
return torch.FloatTensor(y)
class TestDataset(Dataset):
"""
Evaluation Dataset class.
Parameters
----------
triples: The triples used for evaluating the model
num_ent: Number of entities in the knowledge graph
Returns
-------
An evaluation Dataset class instance used by DataLoader for model evaluation
"""
def __init__(self, triples, num_ent):
self.triples = triples
self.num_ent = num_ent
def __len__(self):
return len(self.triples)
def __getitem__(self, idx):
ele = self.triples[idx]
triple, label = torch.LongTensor(ele['triple']), np.int32(ele['label'])
label = self.get_label(label)
return triple, label
@staticmethod
def collate_fn(data):
triples = []
labels = []
for triple, label in data:
triples.append(triple)
labels.append(label)
triple = torch.stack(triples, dim=0)
label = torch.stack(labels, dim=0)
return triple, label
#for edges that exist in the graph, the entry is 1.0, otherwise the entry is 0.0
def get_label(self, label):
y = np.zeros([self.num_ent], dtype=np.float32)
for e2 in label:
y[e2] = 1.0
return torch.FloatTensor(y)
class Data(object):
def __init__(self, dataset, lbl_smooth, num_workers, batch_size):
"""
Reading in raw triples and converts it into a standard format.
Parameters
----------
dataset: The name of the dataset
lbl_smooth: Label smoothing
num_workers: Number of workers of dataloaders
batch_size: Batch size of dataloaders
Returns
-------
self.ent2id: Entity to unique identifier mapping
self.rel2id: Relation to unique identifier mapping
self.id2ent: Inverse mapping of self.ent2id
self.id2rel: Inverse mapping of self.rel2id
self.num_ent: Number of entities in the knowledge graph
self.num_rel: Number of relations in the knowledge graph
self.g: The dgl graph constucted from the edges in the traing set and all the entities in the knowledge graph
self.data['train']: Stores the triples corresponding to training dataset
self.data['valid']: Stores the triples corresponding to validation dataset
self.data['test']: Stores the triples corresponding to test dataset
self.data_iter: The dataloader for different data splits
"""
self.dataset = dataset
self.lbl_smooth = lbl_smooth
self.num_workers = num_workers
self.batch_size = batch_size
#read in raw data and get mappings
ent_set, rel_set = OrderedSet(), OrderedSet()
for split in ['train', 'test', 'valid']:
for line in open('./{}/{}.txt'.format(self.dataset, split)):
sub, rel, obj = map(str.lower, line.strip().split('\t'))
ent_set.add(sub)
rel_set.add(rel)
ent_set.add(obj)
self.ent2id = {ent: idx for idx, ent in enumerate(ent_set)}
self.rel2id = {rel: idx for idx, rel in enumerate(rel_set)}
self.rel2id.update({rel+'_reverse': idx+len(self.rel2id) for idx, rel in enumerate(rel_set)})
self.id2ent = {idx: ent for ent, idx in self.ent2id.items()}
self.id2rel = {idx: rel for rel, idx in self.rel2id.items()}
self.num_ent = len(self.ent2id)
self.num_rel = len(self.rel2id) // 2
#read in ids of subjects, relations, and objects for train/test/valid
self.data = ddict(list) #stores the triples
sr2o = ddict(set) #The key of sr20 is (subject, relation), and the items are all the successors following (subject, relation)
src=[]
dst=[]
rels = []
inver_src = []
inver_dst = []
inver_rels = []
for split in ['train', 'test', 'valid']:
for line in open('./{}/{}.txt'.format(self.dataset, split)):
sub, rel, obj = map(str.lower, line.strip().split('\t'))
sub_id, rel_id, obj_id = self.ent2id[sub], self.rel2id[rel], self.ent2id[obj]
self.data[split].append((sub_id, rel_id, obj_id))
if split == 'train':
sr2o[(sub_id, rel_id)].add(obj_id)
sr2o[(obj_id, rel_id+self.num_rel)].add(sub_id) #append the reversed edges
src.append(sub_id)
dst.append(obj_id)
rels.append(rel_id)
inver_src.append(obj_id)
inver_dst.append(sub_id)
inver_rels.append(rel_id+self.num_rel)
#construct dgl graph
src = src + inver_src
dst = dst + inver_dst
rels = rels + inver_rels
self.g = dgl.graph((src, dst), num_nodes=self.num_ent)
self.g.edata['etype'] = torch.Tensor(rels).long()
#identify in and out edges
in_edges_mask = [True] * (self.g.num_edges()//2) + [False] * (self.g.num_edges()//2)
out_edges_mask = [False] * (self.g.num_edges()//2) + [True] * (self.g.num_edges()//2)
self.g.edata['in_edges_mask'] = torch.Tensor(in_edges_mask)
self.g.edata['out_edges_mask'] = torch.Tensor(out_edges_mask)
#Prepare train/valid/test data
self.data = dict(self.data)
self.sr2o = {k: list(v) for k, v in sr2o.items()} #store only the train data
for split in ['test', 'valid']:
for sub, rel, obj in self.data[split]:
sr2o[(sub, rel)].add(obj)
sr2o[(obj, rel+self.num_rel)].add(sub)
self.sr2o_all = {k: list(v) for k, v in sr2o.items()} #store all the data
self.triples = ddict(list)
for (sub, rel), obj in self.sr2o.items():
self.triples['train'].append({'triple':(sub, rel, -1), 'label': self.sr2o[(sub, rel)]})
for split in ['test', 'valid']:
for sub, rel, obj in self.data[split]:
rel_inv = rel + self.num_rel
self.triples['{}_{}'.format(split, 'tail')].append({'triple': (sub, rel, obj), 'label': self.sr2o_all[(sub, rel)]})
self.triples['{}_{}'.format(split, 'head')].append({'triple': (obj, rel_inv, sub), 'label': self.sr2o_all[(obj, rel_inv)]})
self.triples = dict(self.triples)
def get_train_data_loader(split, batch_size, shuffle=True):
return DataLoader(
TrainDataset(self.triples[split], self.num_ent, self.lbl_smooth),
batch_size = batch_size,
shuffle = shuffle,
num_workers = max(0, self.num_workers),
collate_fn = TrainDataset.collate_fn
)
def get_test_data_loader(split, batch_size, shuffle=True):
return DataLoader(
TestDataset(self.triples[split], self.num_ent),
batch_size = batch_size,
shuffle = shuffle,
num_workers = max(0, self.num_workers),
collate_fn = TestDataset.collate_fn
)
#train/valid/test dataloaders
self.data_iter = {
'train': get_train_data_loader('train', self.batch_size),
'valid_head': get_test_data_loader('valid_head', self.batch_size),
'valid_tail': get_test_data_loader('valid_tail', self.batch_size),
'test_head': get_test_data_loader('test_head', self.batch_size),
'test_tail': get_test_data_loader('test_tail', self.batch_size),
}
|
onmt/translate/__init__.py | qurrata111/OpenNMT-py | 5,864 | 11159469 | """ Modules for translation """
from onmt.translate.translator import Translator, GeneratorLM
from onmt.translate.translation import Translation, TranslationBuilder
from onmt.translate.beam_search import BeamSearch, GNMTGlobalScorer
from onmt.translate.beam_search import BeamSearchLM
from onmt.translate.decode_strategy import DecodeStrategy
from onmt.translate.greedy_search import GreedySearch, GreedySearchLM
from onmt.translate.penalties import PenaltyBuilder
from onmt.translate.translation_server import TranslationServer, \
ServerModelError
__all__ = ['Translator', 'Translation', 'BeamSearch',
'GNMTGlobalScorer', 'TranslationBuilder',
'PenaltyBuilder', 'TranslationServer', 'ServerModelError',
"DecodeStrategy", "GreedySearch", "GreedySearchLM",
"BeamSearchLM", "GeneratorLM"]
|
Filters/General/Testing/Python/TestFEDiscreteClipper2D.py | satya-arjunan/vtk8 | 1,755 | 11159471 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
ren1.SetBackground(1,1,1)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Create synthetic image data
VTK_SHORT = 4
img = vtk.vtkImageData()
img.SetDimensions(6,5,1)
img.AllocateScalars(VTK_SHORT,1)
scalars = img.GetPointData().GetScalars()
scalars.SetTuple1(0,0)
scalars.SetTuple1(1,0)
scalars.SetTuple1(2,0)
scalars.SetTuple1(3,0)
scalars.SetTuple1(4,0)
scalars.SetTuple1(5,0)
scalars.SetTuple1(6,0)
scalars.SetTuple1(7,0)
scalars.SetTuple1(8,0)
scalars.SetTuple1(9,0)
scalars.SetTuple1(10,0)
scalars.SetTuple1(11,0)
scalars.SetTuple1(12,0)
scalars.SetTuple1(13,0)
scalars.SetTuple1(14,0)
scalars.SetTuple1(15,2)
scalars.SetTuple1(16,4)
scalars.SetTuple1(17,0)
scalars.SetTuple1(18,0)
scalars.SetTuple1(19,0)
scalars.SetTuple1(20,1)
scalars.SetTuple1(21,1)
scalars.SetTuple1(22,3)
scalars.SetTuple1(23,3)
scalars.SetTuple1(24,0)
scalars.SetTuple1(25,0)
scalars.SetTuple1(26,3)
scalars.SetTuple1(27,0)
scalars.SetTuple1(28,0)
scalars.SetTuple1(29,3)
# Create the pipeline, extract some regions
discrete = vtk.vtkDiscreteFlyingEdgesClipper2D()
discrete.SetInputData(img)
discrete.SetValue(0,1)
discrete.SetValue(1,2)
discrete.SetValue(2,3)
discrete.SetValue(3,4)
discrete.Update()
# Clipped polygons are generated
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(discrete.GetOutputPort())
mapper.SetScalarModeToUseCellData()
mapper.SetScalarRange(1,4);
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# The image gridlines
gridMapper = vtk.vtkDataSetMapper()
gridMapper.SetInputData(img)
gridMapper.ScalarVisibilityOff()
gridActor = vtk.vtkActor()
gridActor.SetMapper(gridMapper)
gridActor.GetProperty().SetRepresentationToWireframe()
gridActor.GetProperty().SetColor(0,0,1)
ren1.AddActor(actor)
ren1.AddActor(gridActor)
renWin.Render()
iren.Start()
|
examples/service/handlers/secondService.py | BSlience/fastweb | 123 | 11159511 | # coding:utf8
"""generate by fasthrift"""
from fastweb.service import ABLogic
class secondServiceHandler(ABLogic):
def sayHello(self):
pass
def getData(self):
pass
# coding:utf8
"""generate by fasthrift"""
from fastweb.service import ABLogic
class secondServiceHandler(ABLogic):
def sayHello(self):
pass
def getData(self):
pass
# coding:utf8
"""generate by fasthrift"""
from fastweb.service import ABLogic
class secondServiceHandler(ABLogic):
def sayHello(self):
pass
def getData(self):
pass
# coding:utf8
"""generate by fasthrift"""
from fastweb.service import ABLogic
class secondServiceHandler(ABLogic):
def sayHello(self):
pass
def getData(self):
pass
|
tests/h/views/groups_test.py | pombredanne/h | 2,103 | 11159515 | <reponame>pombredanne/h
from unittest import mock
import pytest
from h_matchers import Any
from pyramid.httpexceptions import HTTPMovedPermanently
from h.traversal.group import GroupContext
from h.views import groups as views
@pytest.mark.usefixtures("group_create_service", "handle_form_submission", "routes")
class TestGroupCreateController:
def test_get_renders_form(self, controller):
controller.form = form_validating_to({})
result = controller.get()
assert result == {"form": "valid form"}
def test_post_calls_handle_form_submission(
self, controller, handle_form_submission
):
controller.post()
handle_form_submission.assert_called_once_with(
controller.request, controller.form, Any.function(), Any.function()
)
def test_post_returns_handle_form_submission(
self, controller, handle_form_submission
):
assert controller.post() == handle_form_submission.return_value
def test_post_creates_new_group_if_form_valid(
self, controller, group_create_service, handle_form_submission, pyramid_config
):
pyramid_config.testing_securitypolicy("ariadna")
# If the form submission is valid then handle_form_submission() should
# call on_success() with the appstruct.
def call_on_success( # pylint: disable=unused-argument
request, form, on_success, on_failure
):
on_success({"name": "my_new_group", "description": "foobar"})
handle_form_submission.side_effect = call_on_success
controller.post()
assert group_create_service.create_private_group.call_args_list == [
mock.call(name="my_new_group", userid="ariadna", description="foobar")
]
def test_post_redirects_if_form_valid(
self,
controller,
handle_form_submission,
matchers,
group_create_service,
factories,
):
group = factories.Group()
group_create_service.create_private_group.return_value = group
# If the form submission is valid then handle_form_submission() should
# return the redirect that on_success() returns.
def return_on_success( # pylint: disable=unused-argument
request, form, on_success, on_failure
):
return on_success({"name": "my_new_group"})
handle_form_submission.side_effect = return_on_success
response = controller.post()
assert response == matchers.Redirect303To(f"/g/{group.pubid}/{group.slug}")
def test_post_does_not_create_group_if_form_invalid(
self, controller, group_create_service, handle_form_submission
):
# If the form submission is invalid then handle_form_submission() should
# call on_failure().
def call_on_failure( # pylint: disable=unused-argument
request, form, on_success, on_failure
):
on_failure()
handle_form_submission.side_effect = call_on_failure
controller.post()
assert not group_create_service.create_private_group.called
def test_post_returns_template_data_if_form_invalid(
self, controller, handle_form_submission
):
# If the form submission is invalid then handle_form_submission() should
# return the template data that on_failure() returns.
def return_on_failure( # pylint: disable=unused-argument
request, form, on_success, on_failure
):
return on_failure()
handle_form_submission.side_effect = return_on_failure
assert controller.post() == {"form": controller.form.render.return_value}
@pytest.fixture
def controller(self, pyramid_request):
return views.GroupCreateController(pyramid_request)
@pytest.fixture
def handle_form_submission(self, patch):
return patch("h.views.groups.form.handle_form_submission")
@pytest.mark.usefixtures("routes")
class TestGroupEditController:
def test_get_reads_group_properties(self, pyramid_request, group):
pyramid_request.create_form.return_value = FakeForm()
result = views.GroupEditController(GroupContext(group), pyramid_request).get()
assert result == {
"form": {
"name": group.name,
"description": group.description,
},
"group_path": f"/g/{group.pubid}/{group.slug}",
}
def test_post_sets_group_properties(
self, form_validating_to, pyramid_request, group
):
controller = views.GroupEditController(GroupContext(group), pyramid_request)
controller.form = form_validating_to(
{"name": "New name", "description": "New description"}
)
controller.post()
assert group.name == "New name"
assert group.description == "New description"
@pytest.fixture
def group(self, factories):
return factories.Group(description="DESCRIPTION")
@pytest.mark.usefixtures("routes")
def test_read_noslug_redirects(pyramid_request, factories):
group = factories.Group()
with pytest.raises(HTTPMovedPermanently) as exc:
views.read_noslug(GroupContext(group), pyramid_request)
assert exc.value.location == f"/g/{group.pubid}/{group.slug}"
class FakeForm:
def set_appstruct(self, appstruct):
self.appstruct = appstruct # pylint:disable=attribute-defined-outside-init
def render(self):
return self.appstruct
def form_validating_to(appstruct):
form = mock.Mock()
form.validate.return_value = appstruct
form.render.return_value = "valid form"
return form
@pytest.fixture
def routes(pyramid_config):
pyramid_config.add_route("group_read", "/g/{pubid}/{slug}")
|
1100-1200q/1184.py | rampup01/Leetcode | 990 | 11159563 | '''
A bus has n stops numbered from 0 to n - 1 that form a circle. We know the distance between all pairs of neighboring stops where distance[i] is the distance between the stops number i and (i + 1) % n.
The bus goes along both directions i.e. clockwise and counterclockwise.
Return the shortest distance between the given start and destination stops.
Example 1:
Input: distance = [1,2,3,4], start = 0, destination = 1
Output: 1
Explanation: Distance between 0 and 1 is 1 or 9, minimum is 1.
Example 2:
Input: distance = [1,2,3,4], start = 0, destination = 2
Output: 3
Explanation: Distance between 0 and 2 is 3 or 7, minimum is 3.
Example 3:
Input: distance = [1,2,3,4], start = 0, destination = 3
Output: 4
Explanation: Distance between 0 and 3 is 6 or 4, minimum is 4.
Constraints:
1 <= n <= 10^4
distance.length == n
0 <= start, destination < n
0 <= distance[i] <= 10^4
'''
class Solution(object):
def distanceBetweenBusStops(self, distance, start, destination):
"""
:type distance: List[int]
:type start: int
:type destination: int
:rtype: int
"""
start, destination = min(start, destination), max(start, destination)
clock_dist = sum(distance[start:destination])
anti_clock_dist = sum(distance[:start]) + sum(distance[destination:])
return min(clock_dist, anti_clock_dist)
|
utils/util.py | shlomi-amitai/monorec | 388 | 11159589 | import json
from functools import partial
from pathlib import Path
from datetime import datetime
from itertools import repeat
from collections import OrderedDict
import torch
from PIL import Image
import numpy as np
import torch.nn.functional as F
def map_fn(batch, fn):
if isinstance(batch, dict):
for k in batch.keys():
batch[k] = map_fn(batch[k], fn)
return batch
elif isinstance(batch, list):
return [map_fn(e, fn) for e in batch]
else:
return fn(batch)
def to(data, device):
if isinstance(data, dict):
return {k: to(data[k], device) for k in data.keys()}
elif isinstance(data, list):
return [to(v, device) for v in data]
else:
return data.to(device)
eps = 1e-6
def preprocess_roi(depth_prediction, depth_gt: torch.Tensor, roi):
if roi is not None:
if isinstance(depth_prediction, list):
depth_prediction = [dpr[:, :, roi[0]:roi[1], roi[2]:roi[3]] for dpr in depth_prediction]
else:
depth_prediction = depth_prediction[:, :, roi[0]:roi[1], roi[2]:roi[3]]
depth_gt = depth_gt[:, :, roi[0]:roi[1], roi[2]:roi[3]]
return depth_prediction, depth_gt
def get_absolute_depth(depth_prediction, depth_gt: torch.Tensor, max_distance=None):
if max_distance is not None:
if isinstance(depth_prediction, list):
depth_prediction = [torch.clamp_min(dpr, 1 / max_distance) for dpr in depth_prediction]
else:
depth_prediction = torch.clamp_min(depth_prediction, 1 / max_distance)
depth_gt = torch.clamp_min(depth_gt, 1 / max_distance)
if isinstance(depth_prediction, list):
return [1 / dpr for dpr in depth_prediction], 1 / depth_gt
else:
return 1 / depth_prediction, 1 / depth_gt
def get_positive_depth(depth_prediction: torch.Tensor, depth_gt: torch.Tensor):
if isinstance(depth_prediction, list):
depth_prediction = [torch.nn.functional.relu(dpr) for dpr in depth_prediction]
else:
depth_prediction = torch.nn.functional.relu(depth_prediction)
depth_gt = torch.nn.functional.relu(depth_gt)
return depth_prediction, depth_gt
def depthmap_to_points(depth: torch.Tensor, intrinsics: torch.Tensor, flatten=False):
n, c, h, w = depth.shape
grid = DepthWarper._create_meshgrid(h, w).expand(n, -1, -1, -1).to(depth.device)
points = pixel2cam(depth, torch.inverse(intrinsics), grid)
if not flatten:
return points
else:
return points.view(n, h * w, 3)
def save_frame_for_tsdf(dir: Path, index, keyframe, depth, pose, crop=None, min_distance=None, max_distance=None):
if crop is not None:
keyframe = keyframe[:, crop[0]:crop[1], crop[2]:crop[3]]
depth = depth[crop[0]:crop[1], crop[2]:crop[3]]
keyframe = ((keyframe + .5) * 255).to(torch.uint8).permute(1, 2, 0)
depth = (1 / depth * 100).to(torch.int16)
depth[depth < 0] = 0
if min_distance is not None:
depth[depth < min_distance * 100] = 0
if max_distance is not None:
depth[depth > max_distance * 100] = 0
Image.fromarray(keyframe.numpy()).save(dir / f"frame-{index:06d}.color.jpg")
Image.fromarray(depth.numpy()).save(dir / f"frame-{index:06d}.depth.png")
np.savetxt(dir / f"frame-{index:06d}.pose.txt", torch.inverse(pose).numpy())
def save_intrinsics_for_tsdf(dir: Path, intrinsics, crop=None):
if crop is not None:
intrinsics[0, 2] -= crop[2]
intrinsics[1, 2] -= crop[0]
np.savetxt(dir / f"camera-intrinsics.txt", intrinsics[:3, :3].numpy())
def get_mask(pred: torch.Tensor, gt: torch.Tensor, max_distance=None, pred_all_valid=True):
mask = gt == 0
if max_distance:
mask |= (gt < 1 / max_distance)
if not pred_all_valid:
mask |= pred == 0
return mask
def mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None):
t = t.clone()
t[m] = 0
els = 1
if dim is None:
dim = list(range(len(t.shape)))
for d in dim:
els *= t.shape[d]
return torch.sum(t, dim=dim) / (els - torch.sum(m.to(torch.float), dim=dim))
def conditional_flip(x, condition, inplace=True):
if inplace:
x[condition, :, :, :] = x[condition, :, :, :].flip(3)
else:
flipped_x = x.clone()
flipped_x[condition, :, :, :] = x[condition, :, :, :].flip(3)
return flipped_x
def create_mask(c: int, height: int, width: int, border_radius: int, device):
mask = torch.ones(c, 1, height - 2 * border_radius, width - 2 * border_radius, device=device)
return torch.nn.functional.pad(mask, [border_radius, border_radius, border_radius, border_radius])
def median_scaling(data_dict):
target = data_dict["target"]
prediction = data_dict["result"]
mask = target > 0
ratios = mask.new_tensor([torch.median(target[i, mask[i]]) / torch.median(prediction[i, mask[i]]) for i in range(target.shape[0])], dtype=torch.float32)
data_dict = dict(data_dict)
data_dict["result"] = prediction * ratios.view(-1, 1, 1, 1)
return data_dict
unsqueezer = partial(torch.unsqueeze, dim=0)
class DS_Wrapper(torch.utils.data.Dataset):
def __init__(self, dataset, start=0, end=-1, every_nth=1):
super().__init__()
self.dataset = dataset
self.start = start
if end == -1:
self.end = len(self.dataset)
else:
self.end = end
self.every_nth = every_nth
def __getitem__(self, index: int):
return self.dataset.__getitem__(index * self.every_nth + self.start)
def __len__(self):
return (self.end - self.start) // self.every_nth + (1 if (self.end - self.start) % self.every_nth != 0 else 0)
class DS_Merger(torch.utils.data.Dataset):
def __init__(self, datasets):
super().__init__()
self.datasets = datasets
def __getitem__(self, index: int):
return (ds.__getitem__(index + self.start) for ds in self.datasets)
def __len__(self):
return len(self.datasets[0])
class LossWrapper(torch.nn.Module):
def __init__(self, loss_function, **kwargs):
super().__init__()
self.kwargs = kwargs
self.loss_function = loss_function
self.num_devices = 1.0
def forward(self, data):
loss_dict = self.loss_function(data, **self.kwargs)
loss_dict = map_fn(loss_dict, lambda x: (x / self.num_devices))
if loss_dict["loss"].requires_grad:
loss_dict["loss"].backward()
loss_dict["loss"].detach_()
return data, loss_dict
class ValueFader:
def __init__(self, steps, values):
self.steps = steps
self.values = values
self.epoch = 0
def set_epoch(self, epoch):
self.epoch = epoch
def get_value(self, epoch=None):
if epoch is None:
epoch = self.epoch
if epoch >= self.steps[-1]:
return self.values[-1]
step_index = 0
while step_index < len(self.steps)-1 and epoch >= self.steps[step_index+1]:
step_index += 1
p = float(epoch - self.steps[step_index]) / float(self.steps[step_index+1] - self.steps[step_index])
return (1-p) * self.values[step_index] + p * self.values[step_index+1]
def pose_distance_thresh(data_dict, spatial_thresh=.6, rotational_thresh=.05):
poses = torch.stack([data_dict["keyframe_pose"]] + data_dict["poses"], dim=1)
forward = poses.new_tensor([0, 0, 1], dtype=torch.float32)
spatial_expanse = torch.norm(torch.max(poses[..., :3, 3], dim=1)[0] - torch.min(poses[..., :3, 3], dim=1)[0], dim=1)
rotational_expanse = torch.norm(torch.max(poses[..., :3, :3] @ forward, dim=1)[0] - torch.min(poses[..., :3, :3] @ forward, dim=1)[0], dim=1)
return (spatial_expanse > spatial_thresh) | (rotational_expanse > rotational_thresh)
def dilate_mask(m: torch.Tensor, size: int = 15):
k = m.new_ones((1, 1, size, size), dtype=torch.float32)
dilated_mask = F.conv2d((m >= 0.5).to(dtype=torch.float32), k, padding=(size//2, size//2))
return dilated_mask > 0
def operator_on_dict(dict_0: dict, dict_1: dict, operator, default=0):
keys = set(dict_0.keys()).union(set(dict_1.keys()))
results = {}
for k in keys:
v_0 = dict_0[k] if k in dict_0 else default
v_1 = dict_1[k] if k in dict_1 else default
results[k] = operator(v_0, v_1)
return results
numbers = [f"{i:d}" for i in range(1, 10, 1)]
def filter_state_dict(state_dict, data_parallel=False):
if data_parallel:
state_dict = {k[7:]: state_dict[k] for k in state_dict}
state_dict = {(k[2:] if k.startswith("0") else k): state_dict[k] for k in state_dict if not k[0] in numbers}
return state_dict
def seed_rng(seed):
torch.manual_seed(seed)
import random
random.seed(seed)
np.random.seed(0)
def ensure_dir(dirname):
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=False)
def read_json(fname):
with fname.open('rt') as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
with fname.open('wt') as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def inf_loop(data_loader):
''' wrapper function for endless data loader. '''
for loader in repeat(data_loader):
yield from loader
class Timer:
def __init__(self):
self.cache = datetime.now()
def check(self):
now = datetime.now()
duration = now - self.cache
self.cache = now
return duration.total_seconds()
def reset(self):
self.cache = datetime.now()
|
Config.py | securitylist/GitLeak | 132 | 11159629 | # Relative path of pattern file to GitPrey
FILE_DB = "pattern/file.db"
INFO_DB = "pattern/info.db"
# GitHub account config for searching
USER_NAME = ""
PASSWORD = ""
# Blacklist
EXT_BLACKLIST = [".ico", ".flv", ".css", ".jpg", ".png", ".jpeg", ".gif", ".pdf", ".ss3", ".rar", ".zip", ".avi", ".mp4", ".swf", ".wmi", ".exe", ".mpeg", ".dll", ".pcap", ".log", ".class", ".html"]
LANG_BLACKLIST = ["html", "jsp", "smali"]
LINE_MUSTHAVE = ['=', ':', 'define']
REPO_NAME_BLACKLIST = ['spider', 'crawl']
# other
MAX_INFONUM = 3
MAX_LINELEN = 512
MAX_COUNT_SINGLE_FILE = 20
MAX_SEARCH_REPO = 20
MAX_REPO_SINGLE_SEARCH = 5 |
furniture/env/xml_adjusting/xml_edit.py | KejiaChen/assembly | 364 | 11159633 | <filename>furniture/env/xml_adjusting/xml_edit.py<gh_stars>100-1000
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
import sys
import argparse
import colorsys
import re
# def _get_colors(num_colors):
# colors=[]
# for i in np.arange(0., 360., 360. / num_colors):
# hue = i/360.
# lightness = (50 + np.random.rand() * 10)/100.
# saturation = (90 + np.random.rand() * 10)/100.
# rgb = colorsys.hls_to_rgb(hue, lightness, saturation)
# rgba = str(tuple([round(x,4) for x in rgb])).strip('()') + ', 0.3'
# colors.append(rgba)
# return colors, rgb
def _get_colors(num_colors):
colors = (
"0.9019 0.0980 0.2941 0.3",
"0.2352 0.7058 0.2941 0.3",
"1.0 0.8823 0.0980 0.3",
"0.2627 0.3882 0.8470 0.3",
"0.9607 0.5098 0.1921 0.3",
"0.5686 0.1176 0.7058 0.3",
"0.2745 0.9411 0.9411 0.3",
"0.9411 0.1960 0.9019 0.3",
"0.7372 0.9647 0.0470 0.3",
"0.9803 0.7450 0.7450 0.3",
"0.0 0.5019 0.5019 0.3",
"0.9019 0.7450 1.0 0.3",
"0.6039 0.3882 0.1411 0.3",
"1.0 0.9803 0.7843 0.3",
"0.5019 0.0 0.0 0.3",
"0.6666 1.0 0.7647 0.3",
"0.5019 0.5019 0.0 0.3",
"1.0 0.8470 0.6941 0.3",
"0.0 0.0 0.4588 0.3",
"0.5019 0.5019 0.5019 0.3",
"1.0 1.0 1.0 0.3",
"0.0 0.0 0.0 0.3",
)
return colors[0:num_colors]
def str2bool(v):
return v.lower() == "true"
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path",
type=str,
default="../models/assets/objects/in_progress/complete/bench_bjoderna_0208/bench_bjoderna_0208.xml",
)
parser.add_argument("--add_welds", type=str2bool, default=True)
parser.add_argument("--add_colors", type=str2bool, default=True)
config, unparsed = parser.parse_known_args()
tree = ET.parse(config.path) # Path to input file
root = tree.getroot()
equality = root.find("equality")
print(config.path)
# get count of conn_sites, and get map of groups->bodies
bodymap = dict()
connections = set()
# find group pairs
num_colors = 0
for body in root.find("worldbody"):
for child in body.getiterator():
if child.tag == "site" and re.search("conn_site", child.attrib["name"]):
num_colors += 1
groupPair = child.attrib["name"].split(",")[0]
groupNames = groupPair.split("-")
group1 = groupNames[0]
group2 = groupNames[1]
groupPair2 = group2 + "-" + group1
if group1 not in bodymap.keys():
bodies = set()
bodies.add(body)
bodymap[group1] = bodies
else:
bodymap[group1].add(body)
if groupPair not in connections and groupPair2 not in connections:
connections.add(groupPair)
if config.add_welds == True:
for groupPair in connections:
groupNames = groupPair.split("-")
group1 = groupNames[0]
group2 = groupNames[1]
# n*m welds needed for n bodies in group1 and m bodies in group2
for body1 in bodymap[group1]:
for body2 in bodymap[group2]:
weld = ET.SubElement(equality, "weld")
weld.set("active", "false")
weld.set("body1", body1.attrib["name"])
weld.set("body2", body2.attrib["name"])
weld.set("solimp", "1 1 0.5")
weld.set("solref", "0.01 0.3")
if config.add_colors == True:
num_colors = int(num_colors / 2)
colors = _get_colors(num_colors)
# for color in colors:
# print(color)
i = 0
colormap = dict()
for body in root.find("worldbody"):
for child in body.getiterator():
if child.tag == "site" and re.search("conn_site", child.attrib["name"]):
groupPair = child.attrib["name"].split(",")[0]
if groupPair not in colormap:
groupNames = groupPair.split("-")
group1 = groupNames[0]
group2 = groupNames[1]
colormap[groupPair] = colors[i]
groupPair2 = group2 + "-" + group1
colormap[groupPair2] = colors[i]
i += 1
# change color of conn_site
child.set("rgba", colormap[groupPair])
tree.write(config.path, encoding="UTF-8")
if __name__ == "__main__":
main()
|
samples/features/high availability/Linux/Ansible Playbook/library/mssql_ag.py | manikanth/sql-server-samples | 4,474 | 11159637 | <reponame>manikanth/sql-server-samples<gh_stars>1000+
#!/usr/bin/python
# Copyright (c) 2017 Microsoft Corporation
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
---
module: mssql_ag
short_description: Add or join availability groups on a SQL Server instance
description:
- Add or join availability groups on a SQL Server instance.
version_added: "2.2"
author: <NAME> (@arsing)
options:
name:
description:
- The name of the availability group to add
required: true
state:
description:
- The state to set the local replica to
choices: ["all_secondaries_or_unjoined", "all_joined_to_one_primary"]
required: true
all_replicas:
description:
- A list of all the replicas of the AG
required: false
primary:
description:
- The replica that should become the primary
required: false
local_replica:
description:
- The name of the local replica
required: false
dbm_endpoint_port:
description:
- The port of the DBM endpoint
required: false
login_port:
description:
- The TDS port of the instance
required: false
default: 1433
login_name:
description:
- The name of the user to log in to the instance
required: true
login_password:
description:
- The password of the user to log in to the instance
required: true
notes:
- Requires the mssql-tools package on the remote host.
requirements:
- python >= 2.7
- mssql-tools
'''.replace('\t', ' ')
EXAMPLES = '''
# Set all replicas of AG foo to secondary
- mssql_ag:
name: foo
state: all_secondaries_or_unjoined
login_name: sa
login_password: password
# Join all replicas of AG foo to primary on the first server in the group named servers
- mssql_ag:
name: foo
state: all_joined_to_one_primary
all_replicas: "{{ groups['servers'] }}"
primary: "{{ groups['servers'][0] }}"
local_replica: "{{ inventory_hostname }}"
login_name: sa
login_password: password
'''.replace('\t', ' ')
RETURN = '''
name:
description: The name of the AG that was created or joined
returned: success
type: string
sample: foo
'''.replace('\t', ' ')
from ansible.module_utils.basic import AnsibleModule
import subprocess
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required = True),
state = dict(choices = ['all_secondaries_or_unjoined', 'all_joined_to_one_primary'], required = True),
all_replicas = dict(type = 'list', required = False),
primary = dict(required = False),
local_replica = dict(required = False),
dbm_endpoint_port = dict(required = False),
login_port = dict(required = False, default = 1433),
login_name = dict(required = True),
login_password = dict(required = True, no_log = True)
),
required_if = [
['state', 'all_joined_to_one_primary', ['all_replicas', 'primary', 'local_replica', 'dbm_endpoint_port']]
]
)
name = module.params['name']
state = module.params['state']
all_replicas = module.params['all_replicas']
primary = module.params['primary']
local_replica = module.params['local_replica']
dbm_endpoint_port = module.params['dbm_endpoint_port']
login_port = module.params['login_port']
login_name = module.params['login_name']
login_password = module.params['login_password']
if state == "all_secondaries_or_unjoined":
sqlcmd(login_port, login_name, login_password, """
IF EXISTS (
SELECT * FROM sys.availability_groups WHERE name = {0}
)
ALTER AVAILABILITY GROUP {1} SET (ROLE = SECONDARY)
;
""".format(
quoteName(name, "'"),
quoteName(name, '[')
))
elif primary == local_replica:
def replica_spec(name, endpoint_port):
return """
{0} WITH (
ENDPOINT_URL = {1},
AVAILABILITY_MODE = SYNCHRONOUS_COMMIT,
FAILOVER_MODE = EXTERNAL,
SEEDING_MODE = AUTOMATIC
)
""".format(
quoteName(name.split('.')[0], "'"),
quoteName('tcp://{0}:{1}'.format(name, endpoint_port), "'")
)
sqlcmd(login_port, login_name, login_password, """
IF NOT EXISTS (
SELECT * FROM sys.availability_groups WHERE name = {0}
)
CREATE AVAILABILITY GROUP {1}
WITH (CLUSTER_TYPE = EXTERNAL, DB_FAILOVER = ON)
FOR REPLICA ON {2}
ELSE IF NOT EXISTS (
SELECT *
FROM sys.dm_hadr_availability_replica_states ars
JOIN sys.availability_groups ag ON ars.group_id = ag.group_id
WHERE ag.name = {0} AND ars.is_local = 1 AND ars.role = 1
)
BEGIN
EXEC sp_set_session_context @key = N'external_cluster', @value = N'yes', @read_only = 1
ALTER AVAILABILITY GROUP {1} FAILOVER
END
;
ALTER AVAILABILITY GROUP {1} GRANT CREATE ANY DATABASE
;
""".format(
quoteName(name, "'"),
quoteName(name, '['),
replica_spec(primary, dbm_endpoint_port)
))
for replica in all_replicas:
if replica != primary:
sqlcmd(login_port, login_name, login_password, """
IF NOT EXISTS (
SELECT *
FROM sys.availability_replicas ar
JOIN sys.availability_groups ag ON ar.group_id = ag.group_id
WHERE ag.name = {0} AND ar.replica_server_name = {2}
)
ALTER AVAILABILITY GROUP {1}
ADD REPLICA ON {3}
;
""".format(
quoteName(name, "'"),
quoteName(name, '['),
quoteName(replica.split('.')[0], "'"),
replica_spec(replica, dbm_endpoint_port)
))
else:
sqlcmd(login_port, login_name, login_password, """
IF NOT EXISTS (
SELECT * FROM sys.availability_groups WHERE name = {0}
)
ALTER AVAILABILITY GROUP {1} JOIN WITH (CLUSTER_TYPE = EXTERNAL)
;
ALTER AVAILABILITY GROUP {1} GRANT CREATE ANY DATABASE
;
""".format(
quoteName(name, "'"),
quoteName(name, '[')
))
module.exit_json(changed = True, name = name)
def sqlcmd(login_port, login_name, login_password, command):
subprocess.check_call([
'/opt/mssql-tools/bin/sqlcmd',
'-S',
"localhost,{0}".format(login_port),
'-U',
login_name,
'-P',
login_password,
'-b',
'-Q',
command
])
def quoteName(name, quote_char):
if quote_char == '[' or quote_char == ']':
(quote_start_char, quote_end_char) = ('[', ']')
elif quote_char == "'":
(quote_start_char, quote_end_char) = ("N'", "'")
else:
raise Exception("Unsupported quote_char {0}, must be [ or ] or '".format(quote_char))
return "{0}{1}{2}".format(quote_start_char, name.replace(quote_end_char, quote_end_char + quote_end_char), quote_end_char)
if __name__ == '__main__':
main()
|
tests/test_model_definition/test_model_construct.py | ivangirko/ormar | 905 | 11159640 | <reponame>ivangirko/ormar<filename>tests/test_model_definition/test_model_construct.py<gh_stars>100-1000
from typing import List
import databases
import pytest
import sqlalchemy
import ormar
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class NickNames(ormar.Model):
class Meta:
tablename = "nicks"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="hq_name")
class NicksHq(ormar.Model):
class Meta:
tablename = "nicks_x_hq"
metadata = metadata
database = database
class HQ(ormar.Model):
class Meta:
tablename = "hqs"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="hq_name")
nicks: List[NickNames] = ormar.ManyToMany(NickNames, through=NicksHq)
class Company(ormar.Model):
class Meta:
tablename = "companies"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="company_name")
founded: int = ormar.Integer(nullable=True)
hq: HQ = ormar.ForeignKey(HQ)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@pytest.mark.asyncio
async def test_init_and_construct_has_same_effect():
async with database:
async with database.transaction(force_rollback=True):
hq = await HQ.objects.create(name="Main")
comp = Company(name="Banzai", hq=hq, founded=1988)
comp2 = Company.construct(**dict(name="Banzai", hq=hq, founded=1988))
assert comp.dict() == comp2.dict()
comp3 = Company.construct(**dict(name="Banzai", hq=hq.dict(), founded=1988))
assert comp.dict() == comp3.dict()
@pytest.mark.asyncio
async def test_init_and_construct_has_same_effect_with_m2m():
async with database:
async with database.transaction(force_rollback=True):
n1 = await NickNames(name="test").save()
n2 = await NickNames(name="test2").save()
hq = HQ(name="Main", nicks=[n1, n2])
hq2 = HQ.construct(**dict(name="Main", nicks=[n1, n2]))
assert hq.dict() == hq2.dict()
hq3 = HQ.construct(**dict(name="Main", nicks=[n1.dict(), n2.dict()]))
assert hq.dict() == hq3.dict()
|
modules/drizzle.py | beornf/salt-contrib | 111 | 11159644 | # -*- coding: utf-8 -*-
'''
Drizzle is a MySQL fork optimized for Net and Cloud performance.
This module provides Drizzle compatibility to Salt execution
:Depends: MySQLdb python module
:Configuration: The following changes are to be made in
/etc/salt/minion on respective minions
Example::
drizzle.host: '127.0.0.1'
drizzle.port: 4427
drizzle.user: 'root'
drizzle.passwd: ''
drizzle.db: 'drizzle'
Configuration file can also be included such as::
drizzle.default_file: '/etc/drizzle/config.cnf'
'''
# Importing the required libraries
from __future__ import absolute_import
import re
import salt.utils
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.six.moves import zip
try:
import MySQLdb
import MySQLdb.cursors
has_mysqldb = True
except ImportError:
has_mysqldb = False
# Salt Dictionaries
__outputter__ = {
'ping': 'txt',
'status': 'yaml',
'version': 'yaml',
'schemas': 'yaml',
'schema_exists': 'txt',
'schema_create': 'txt',
'schema_drop': 'txt',
'tables': 'yaml',
'table_find': 'yaml',
'query': 'txt'
}
__opts__ = __salt__['test.get_opts']()
# Check for loading the module
def __virtual__():
'''
This module is loaded only if the
database and the libraries are present
'''
# Finding the path of the binary
has_drizzle = False
if salt.utils.which('drizzle'):
has_drizzle = True
# Determining load status of module
if has_mysqldb and has_drizzle:
return 'drizzle'
return False
# Helper functions
def _connect(**dsn):
'''
This method is used to establish a connection
and returns the connection
'''
# Initializing the required variables
dsn_url = {}
parameter = ['host', 'user', 'passwd', 'db', 'port']
# Gathering the dsn information
for param in parameter:
if param in dsn:
dsn_url[param] = dsn[param]
else:
dsn_url[param] = __opts__['drizzle.{0}'.format(param)]
# Connecting to Drizzle!
drizzle_db = MySQLdb.connect(**dsn_url)
drizzle_db.autocommit(True)
return drizzle_db
# Server functions
def status():
'''
Show the status of the Drizzle server
as Variable_name and Value
CLI Example::
salt '*' drizzle.status
'''
# Initializing the required variables
ret_val = {}
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Fetching status
cursor.execute('SHOW STATUS')
for iter in range(cursor.rowcount):
status = cursor.fetchone()
ret_val[status[0]] = status[1]
cursor.close()
drizzle_db.close()
return ret_val
def version():
'''
Returns the version of Drizzle server
that is running on the minion
CLI Example::
salt '*' drizzle.version
'''
drizzle_db = _connect()
cursor = drizzle_db.cursor(MySQLdb.cursors.DictCursor)
# Fetching version
cursor.execute('SELECT VERSION()')
version = cursor.fetchone()
cursor.close()
drizzle_db.close()
return version
# Database functions
def schemas():
'''
Displays the schemas which are already
present in the Drizzle server
CLI Example::
salt '*' drizzle.schemas
'''
# Initializing the required variables
ret_val = {}
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Retriving the list of schemas
cursor.execute('SHOW SCHEMAS')
for iter, count in zip(list(range(cursor.rowcount)), list(range(1, cursor.rowcount+1))):
schema = cursor.fetchone()
ret_val[count] = schema[0]
cursor.close()
drizzle_db.close()
return ret_val
def schema_exists(schema):
'''
This method is used to find out whether
the given schema already exists or not
CLI Example::
salt '*' drizzle.schema_exists
'''
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Checking for existance
cursor.execute('SHOW SCHEMAS LIKE "{0}"'.format(schema))
cursor.fetchall()
if cursor.rowcount == 1:
return True
return False
def schema_create(schema):
'''
This method is used to create a schema.
It takes the name of the schema as argument
CLI Example::
salt '*' drizzle.schema_create schema_name
'''
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Creating schema
try:
cursor.execute('CREATE SCHEMA {0}'.format(schema))
except MySQLdb.ProgrammingError:
return 'Schema already exists'
cursor.close()
drizzle_db.close()
return True
def schema_drop(schema):
'''
This method is used to drop a schema.
It takes the name of the schema as argument.
CLI Example::
salt '*' drizzle.schema_drop schema_name
'''
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Dropping schema
try:
cursor.execute('DROP SCHEMA {0}'.format(schema))
except MySQLdb.OperationalError:
return 'Schema does not exist'
cursor.close()
drizzle_db.close()
return True
def tables(schema):
'''
Displays all the tables that are
present in the given schema
CLI Example::
salt '*' drizzle.tables schema_name
'''
# Initializing the required variables
ret_val = {}
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Fetching tables
try:
cursor.execute('SHOW TABLES IN {0}'.format(schema))
except MySQLdb.OperationalError:
return 'Unknown Schema'
for iter, count in zip(list(range(cursor.rowcount)), list(range(1, cursor.rowcount+1))):
table = cursor.fetchone()
ret_val[count] = table[0]
cursor.close()
drizzle_db.close()
return ret_val
def table_find(table_to_find):
'''
Finds the schema in which the
given table is present
CLI Example::
salt '*' drizzle.table_find table_name
'''
# Initializing the required variables
ret_val = {}
count = 1
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Finding the schema
schema = schemas()
for schema_iter in six.iterkeys(schema):
table = tables(schema[schema_iter])
for table_iter in six.iterkeys(table):
if table[table_iter] == table_to_find:
ret_val[count] = schema[schema_iter]
count = count+1
cursor.close()
drizzle_db.close()
return ret_val
# Plugin functions
def plugins():
'''
Fetches the plugins added to the database server
CLI Example::
salt '*' drizzle.plugins
'''
# Initializing the required variables
ret_val = {}
count = 1
drizzle_db = _connect()
cursor = drizzle_db.cursor()
# Fetching the plugins
query = 'SELECT PLUGIN_NAME FROM DATA_DICTIONARY.PLUGINS WHERE IS_ACTIVE LIKE "YES"'
cursor.execute(query)
for iter, count in zip(list(range(cursor.rowcount)), list(range(1, cursor.rowcount+1))):
table = cursor.fetchone()
ret_val[count] = table[0]
cursor.close()
drizzle_db.close()
return ret_val
# TODO: Needs to add plugin_add() and plugin_remove() methods.
# However, only some of the plugins are dynamic at the moment.
# Remaining plugins need the server to be restarted.
# Hence, these methods can be hacked in the future!
# Query functions
def query(schema, query):
'''
Query method is used to issue any query to the database.
This method also supports multiple queries.
CLI Example::
salt '*' drizzle.query test_db 'select * from test_table'
salt '*' drizzle.query test_db 'insert into test_table values (1,"test1")'
'''
# Initializing the required variables
ret_val = {}
result = {}
drizzle_db = _connect()
cursor = drizzle_db.cursor()
columns = ()
rows = ()
tuples = {}
queries = []
_entry = True
# Support for mutilple queries
queries = query.split(";")
# Using the schema
try:
cursor.execute('USE {0}'.format(schema))
except MySQLdb.Error:
return 'check your schema'
# Issuing the queries
for issue in queries:
try:
rows_affected = cursor.execute(issue)
except MySQLdb.Error:
return 'Error in your SQL statement'
# Checking whether the query is a SELECT
if re.search(r'\s*select', issue) is None:
result['Rows affected:'] = rows_affected
ret_val[issue.lower()] = result
result = {}
continue
# Fetching the column names
if _entry:
attributes = cursor.description
for column_names in attributes:
columns += (column_names[0],)
_entry = False
result['columns'] = columns
# Fetching the tuples
count = 1
for iter in range(cursor.rowcount):
row = cursor.fetchone()
result['row{0}'.format(count)] = row
count += 1
result['Rows selected:'] = count-1
ret_val[issue.lower()] = result
result = {}
return ret_val
def ping():
'''
Checks whether Drizzle module is loaded or not
'''
return True
|
codigo/Live115/observer_concreto.py | cassiasamp/live-de-python | 572 | 11159688 | from observer_abc import Observador, Observavel
class BolaDeCristal:
def atualizar(self, mensagem):
print(f"Fausto está na escola, mas recebeu a mensagem: {mensagem}")
class Centauro:
def __init__(self):
self._observers = []
def adicionar_observer(self, observador):
self._observers.append(observador)
def notificar_observers(self, mensagem):
for observador in self._observers:
observador.atualizar(mensagem)
class Unicornio:
def __init__(self):
self._observers = []
def adicionar_observer(self, observador):
self._observers.append(observador)
def notificar_observers(self, mensagem):
for observador in self._observers:
observador.atualizar(mensagem)
unicornio = Unicornio()
unicornio.adicionar_observer(BolaDeCristal())
unicornio.notificar_observers('Os mortos chegaram #medo')
centauro = Centauro()
centauro.adicionar_observer(BolaDeCristal())
centauro.notificar_observers('Os mortos chegaram #medo')
|
aries_cloudagent/protocols/present_proof/v1_0/handlers/presentation_problem_report_handler.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 11159722 | <filename>aries_cloudagent/protocols/present_proof/v1_0/handlers/presentation_problem_report_handler.py
"""Presentation problem report message handler."""
from .....messaging.base_handler import BaseHandler
from .....messaging.request_context import RequestContext
from .....messaging.responder import BaseResponder
from .....storage.error import StorageError, StorageNotFoundError
from ..manager import PresentationManager
from ..messages.presentation_problem_report import PresentationProblemReport
class PresentationProblemReportHandler(BaseHandler):
"""Message handler class for problem reports."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Message handler logic for problem reports.
Args:
context: request context
responder: responder callback
"""
self._logger.debug(
"Present-proof v1.0 problem report handler called with context %s",
context,
)
assert isinstance(context.message, PresentationProblemReport)
presentation_manager = PresentationManager(context.profile)
try:
await presentation_manager.receive_problem_report(
context.message,
context.connection_record.connection_id,
)
except (StorageError, StorageNotFoundError):
self._logger.exception(
"Error processing present-proof v1.0 problem report message"
)
|
src/python/tests/test_estimator_checks.py | montehoover/NimbusML | 134 | 11159729 | <gh_stars>100-1000
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
"""
run check_estimator tests
"""
import json
import os
import distro
import unittest
from nimbusml.cluster import KMeansPlusPlus
from nimbusml.decomposition import FactorizationMachineBinaryClassifier
from nimbusml.ensemble import EnsembleClassifier
from nimbusml.ensemble import EnsembleRegressor
from nimbusml.ensemble import LightGbmBinaryClassifier
from nimbusml.ensemble import LightGbmClassifier
from nimbusml.ensemble import LightGbmRanker
from nimbusml.ensemble import LightGbmRegressor
from nimbusml.feature_extraction.text import NGramFeaturizer
from nimbusml.internal.entrypoints._ngramextractor_ngram import n_gram
from nimbusml.preprocessing import TensorFlowScorer, DateTimeSplitter
from nimbusml.linear_model import SgdBinaryClassifier
from nimbusml.preprocessing.filter import SkipFilter, TakeFilter
from nimbusml.preprocessing.normalization import RobustScaler
from nimbusml.timeseries import (IidSpikeDetector, IidChangePointDetector,
SsaSpikeDetector, SsaChangePointDetector,
SsaForecaster)
from sklearn.utils.estimator_checks import _yield_all_checks, MULTI_OUTPUT
this = os.path.abspath(os.path.dirname(__file__))
OMITTED_CHECKS = {
# by design consistent input with model
# don't accept randomly created inputs
'TensorFlowScorer': 'check_dict_unchanged, '
'check_dont_overwrite_parameters, '
'check_dtype_object, '
'check_estimator_sparse_data, '
'check_estimators_dtypes, '
'check_estimators_fit_returns_self, '
'check_estimators_overwrite_params, '
'check_estimators_pickle, '
'check_fit1d_1feature, '
'check_fit2d_1feature, '
'check_fit2d_1sample, '
'check_fit2d_predict1d, '
'check_fit_score_takes_y, '
'check_pipeline_consistency, '
'check_transformer_data_not_an_array, '
'check_transformer_general',
# by design non-determenistic output
'BootstrapSampler': 'check_transformer_general, '
'check_transformer_data_not_an_array',
# by design non-determenistic output
'ColumnDropper': 'check_transformer_general, '
'check_transformer_data_not_an_array',
# I8 should not have NA values
'CountSelector':
'check_estimators_dtypes',
# DateTimeSplitter does not work with floating point types.
'DateTimeSplitter':
'check_transformer_general, check_pipeline_consistency'
'check_estimators_pickle, check_estimators_dtypes'
'check_dict_unchanged, check_dtype_object, check_fit_score_takes_y'
'check_transformer_data_not_an_array, check_fit1d_1feature,'
'check_fit2d_1feature, check_fit2d_predict1d, check_estimators_overwrite_params,'
'check_estimator_sparse_data, check_fit2d_1sample, check_dont_overwrite_parameters,'
'check_estimators_fit_returns_self',
# by design returns smaller number of rows
'SkipFilter': 'check_transformer_general, '
'check_transformer_data_not_an_array',
# fix pending in PR, bug cant handle csr matrix
'RangeFilter': 'check_estimators_dtypes, '
'check_estimator_sparse_data',
# time series do not currently support sparse matrices
'IidSpikeDetector': 'check_estimator_sparse_data',
'IidChangePointDetector': 'check_estimator_sparse_data',
'SsaSpikeDetector': 'check_estimator_sparse_data'
'check_fit2d_1sample', # SSA requires more than one sample
'SsaChangePointDetector': 'check_estimator_sparse_data'
'check_fit2d_1sample', # SSA requires more than one sample
'SsaForecaster': 'check_estimator_sparse_data'
'check_fit2d_1sample', # SSA requires more than one sample
# bug, low tolerance
'FastLinearRegressor': 'check_supervised_y_2d, '
'check_regressor_data_not_an_array, '
'check_regressors_int, '
# todo: investigate
'check_regressors_train',
# bug decision function shape should be 1
# dimensional arrays, tolerance
'FastLinearClassifier': 'check_classifiers_train',
'FastForestRegressor': 'check_fit_score_takes_y', # bug
'EnsembleClassifier': 'check_supervised_y_2d, '
'check_classifiers_train',
'EnsembleRegressor': 'check_supervised_y_2d, '
'check_regressors_train',
# bug in decision_function
'FastTreesBinaryClassifier':
'check_decision_proba_consistency',
# I8 should not have NA values
'Filter':
'check_estimators_dtypes',
# I8 should not have NA values
'Handler':
'check_estimators_dtypes',
# I8 should not have NA values
'Indicator':
'check_estimators_dtypes',
# tolerance
'LogisticRegressionClassifier': 'check_classifiers_train',
# todo: investigate
'OnlineGradientDescentRegressor': 'check_regressors_train',
# bug decision function shape, prediction bug
'NaiveBayesClassifier':
'check_classifiers_train, check_classifiers_classes',
# bugs cant handle negative label
'PoissonRegressionRegressor':
'check_regressors_train, '
'check_regressors_no_decision_function',
'MutualInformationSelector':
'check_dtype_object, check_estimators_dtypes, \
check_estimators_pickle, '
'check_transformer_data_not_an_array, '
'check_transformer_general, \
check_fit1d_1feature, check_fit_score_takes_y, '
'check_fit2d_predict1d, '
'check_dont_overwrite_parameters, \
check_fit2d_1sample, check_dict_unchanged, '
'check_estimators_overwrite_params, '
'check_estimators_fit_returns_self, \
check_fit2d_1feature, check_pipeline_consistency, '
'check_estimator_sparse_data',
# bug in decision_function
'SymSgdBinaryClassifier':
'check_decision_proba_consistency',
# bug in decision_function
'LightGbmClassifier': 'check_classifiers_train',
# bug cant handle the data
'LightGbmRegressor': 'check_fit2d_1sample',
# bug, no attribute clusterer.labels_
'KMeansPlusPlus': 'check_clustering',
'LightGbmRanker':
'check_classifiers_regression_target, '
'check_pipeline_consistency, check_supervised_y_2d, '
'check_classifiers_one_label, \
check_classifiers_classes, check_fit2d_1feature, '
'check_classifiers_train, check_fit2d_1sample, '
'check_dont_overwrite_parameters,\
check_classifier_data_not_an_array, check_dtype_object, '
'check_fit_score_takes_y, check_estimators_dtypes,\
check_estimators_nan_inf, check_dict_unchanged, '
'check_fit1d_1feature, check_fit2d_predict1d, \
check_estimators_overwrite_params, '
'check_estimator_sparse_data, check_estimators_pickle, '
'check_estimators_fit_returns_self',
'PcaAnomalyDetector':
'check_pipeline_consistency, check_supervised_y_2d, '
'check_classifiers_classes, check_fit2d_1feature, \
check_estimators_fit_returns_self, '
'check_classifiers_train, '
'check_dont_overwrite_parameters, \
check_classifier_data_not_an_array, check_dtype_object,'
' check_fit_score_takes_y, check_estimators_dtypes,\
check_dict_unchanged, check_fit1d_1feature, '
'check_fit2d_predict1d, '
'check_estimators_overwrite_params, \
check_estimator_sparse_data, check_estimators_pickle, '
'check_estimators_nan_inf',
# RobustScaler does not support vectorized types
'RobustScaler': 'check_estimator_sparse_data',
'ToKeyImputer':
'check_estimator_sparse_data, check_estimators_dtypes',
# Most of these skipped tests are failing because the checks
# require numerical types. ToString returns object types.
# TypeError: ufunc 'isfinite' not supported for the input types
'ToString': 'check_estimator_sparse_data, check_pipeline_consistency'
'check_transformer_data_not_an_array, check_estimators_pickle'
'check_transformer_general',
'OrdinaryLeastSquaresRegressor': 'check_fit2d_1sample'
}
OMITTED_CHECKS_TUPLE = (
'OneHotHashVectorizer, FromKey, DnnFeaturizer, '
'PixelExtractor, Loader, Resizer, \
GlobalContrastRowScaler, PcaTransformer, '
'ColumnConcatenator, Sentiment, CharTokenizer, LightLda, '
'NGramFeaturizer, WordEmbedding, LpScaler, WordTokenizer'
'NGramExtractor',
'check_transformer_data_not_an_array, check_pipeline_consistency, '
'check_fit2d_1feature, check_estimators_fit_returns_self,\
check_fit2d_1sample, '
'check_dont_overwrite_parameters, '
'check_dtype_object, check_fit_score_takes_y, '
'check_estimators_dtypes, \
check_transformer_general, check_dict_unchanged, '
'check_fit1d_1feature, check_fit2d_predict1d, '
'check_estimators_overwrite_params, \
check_estimator_sparse_data, '
'check_estimators_pickle')
OMITTED_CHECKS_ALWAYS = 'check_estimators_nan_inf'
NOBINARY_CHECKS = [
'check_estimator_sparse_data',
'check_dtype_object',
'check_fit_score_takes_y',
'check_fit2d_predict1d',
'check_fit1d_1feature',
'check_dont_overwrite_parameters',
'check_supervised_y_2d',
'check_estimators_fit_returns_self',
'check_estimators_overwrite_params',
'check_estimators_dtypes',
'check_classifiers_classes',
'check_classifiers_train']
INSTANCES = {
'DateTimeSplitter': DateTimeSplitter(prefix='dt', columns=['F0']),
'EnsembleClassifier': EnsembleClassifier(num_models=3),
'EnsembleRegressor': EnsembleRegressor(num_models=3),
'FactorizationMachineBinaryClassifier': FactorizationMachineBinaryClassifier(shuffle=False),
'KMeansPlusPlus': KMeansPlusPlus(n_clusters=2),
'LightGbmBinaryClassifier': LightGbmBinaryClassifier(
minimum_example_count_per_group=1, minimum_example_count_per_leaf=1),
'LightGbmClassifier': LightGbmClassifier(
minimum_example_count_per_group=1, minimum_example_count_per_leaf=1),
'LightGbmRegressor': LightGbmRegressor(
minimum_example_count_per_group=1, minimum_example_count_per_leaf=1),
'LightGbmRanker': LightGbmRanker(
minimum_example_count_per_group=1, minimum_example_count_per_leaf=1),
'NGramFeaturizer': NGramFeaturizer(word_feature_extractor=n_gram()),
'RobustScaler': RobustScaler(scale=False),
'SgdBinaryClassifier': SgdBinaryClassifier(number_of_threads=1, shuffle=False),
'SkipFilter': SkipFilter(count=5),
'TakeFilter': TakeFilter(count=100000),
'IidSpikeDetector': IidSpikeDetector(columns=['F0']),
'IidChangePointDetector': IidChangePointDetector(columns=['F0']),
'SsaSpikeDetector': SsaSpikeDetector(columns=['F0'], seasonal_window_size=2),
'SsaChangePointDetector': SsaChangePointDetector(columns=['F0'], seasonal_window_size=2),
'SsaForecaster': SsaForecaster(columns=['F0'],
window_size=2,
series_length=5,
train_size=5,
horizon=1),
'TensorFlowScorer': TensorFlowScorer(
model_location=os.path.join(
this,
'..',
'nimbusml',
'examples',
'frozen_saved_model.pb'),
columns={'c': ['a', 'b']}),
}
MULTI_OUTPUT_EX = [
'FastLinearClassifier',
'FastLinearRegressor',
'LogisticRegressionClassifier',
'FastTreesRegressor',
'FastForestRegressor',
'FastTreesTweedieRegressor',
'OneClassSvmAnomalyDetector',
'NaiveBayesClassifier',
'GamBinaryClassifier',
'GamRegressor',
'OnlineGradientDescentRegressor',
'OrdinaryLeastSquaresRegressor',
'PoissonRegressionRegressor',
'SymSgdBinaryClassifier',
'LightGbmClassifier',
'LightGbmRegressor']
MULTI_OUTPUT.extend(MULTI_OUTPUT_EX)
skip_epoints = set([
'OneVsRestClassifier',
'TreeFeaturizer',
# skip SymSgdBinaryClassifier for now, because of crashes.
'SymSgdBinaryClassifier',
'DatasetTransformer',
'OnnxRunner',
'TimeSeriesImputer'
])
if 'centos' in distro.linux_distribution(full_distribution_name=False)[0].lower():
skip_epoints |= set([
'DateTimeSplitter',
'RobustScaler',
'ToKeyImputer',
'ToString'])
def load_json(file_path):
with open(file_path) as f:
lines = f.readlines()
lines = [l for l in lines if not l.strip().startswith('#')]
content_without_comments = '\n'.join(lines)
return json.loads(content_without_comments)
def get_epoints():
epoints = []
my_path = os.path.realpath(__file__)
my_dir = os.path.dirname(my_path)
manifest_diff_json = os.path.join(my_dir, '..', 'tools',
'manifest_diff.json')
manifest_diff = load_json(manifest_diff_json)
for e in manifest_diff['EntryPoints']:
if (e['NewName'] not in skip_epoints) and ('LightGbm' not in e['NewName']):
epoints.append((e['Module'], e['NewName']))
return epoints
class TestEstimatorChecks(unittest.TestCase):
# This method is a static method of the class
# because there were pytest fixture related
# issues when the method was in the global scope.
@staticmethod
def generate_test_method(epoint):
def method(self):
failed_checks = set()
passed_checks = set()
class_name = epoint[1]
print("\n======== now Estimator is %s =========== " % class_name)
mod = __import__('nimbusml.' + epoint[0], fromlist=[str(class_name)])
the_class = getattr(mod, class_name)
if class_name in INSTANCES:
estimator = INSTANCES[class_name]
else:
estimator = the_class()
if estimator._use_single_input_as_string():
estimator = estimator << 'F0'
for check in _yield_all_checks(class_name, estimator):
# Skip check_dict_unchanged for estimators which
# update the classes_ attribute. For more details
# see https://github.com/microsoft/NimbusML/pull/200
if (check.__name__ == 'check_dict_unchanged') and \
(hasattr(estimator, 'predict_proba') or
hasattr(estimator, 'decision_function')):
continue
if check.__name__ in OMITTED_CHECKS_ALWAYS:
continue
if 'Binary' in class_name and check.__name__ in NOBINARY_CHECKS:
continue
if class_name in OMITTED_CHECKS and check.__name__ in \
OMITTED_CHECKS[class_name]:
continue
if class_name in OMITTED_CHECKS_TUPLE[0] and check.__name__ in \
OMITTED_CHECKS_TUPLE[1]:
continue
try:
check(class_name, estimator.clone())
passed_checks.add(check.__name__)
except Exception as e:
failed_checks.add(check.__name__)
if len(failed_checks) > 0:
self.fail(msg=str(failed_checks))
return method
for epoint in get_epoints():
test_name = 'test_%s' % epoint[1].lower()
method = TestEstimatorChecks.generate_test_method(epoint)
setattr(TestEstimatorChecks, test_name, method)
if __name__ == '__main__':
unittest.main()
|
tests/framework/PostProcessors/FastFourierTransform/Basic/dataGenerator.py | rinelson456/raven | 159 | 11159738 | <filename>tests/framework/PostProcessors/FastFourierTransform/Basic/dataGenerator.py<gh_stars>100-1000
import numpy as np
N = 100
t = np.linspace(0,N,N)
periods = [[4,8,20],
[3.333,13,50]]
amplitudes = [[5,3,10],
[6,1,100]]
def makeSignal(periods,amplitudes,N):
signal = np.zeros(N)
for i in range(len(periods)):
signal += np.sin(2.*np.pi/periods[i] * t) + np.cos(2.*np.pi/periods[i] * t)
return signal
for i in range(2):
signal = makeSignal(periods[i],amplitudes[i],N)
with open('signal_{}.csv'.format(i),'w') as f:
f.writelines('t,signal\n')
for i in range(len(t)):
f.writelines('{},{}\n'.format(t[i],signal[i]))
print signal
|
scripts/CreateAssemblyGraphVertices.py | tijyojwad/shasta | 267 | 11159779 | #!/usr/bin/python3
import shasta
a = shasta.Assembler()
a.accessMarkerGraphVertices()
a.accessMarkerGraphEdges()
a.accessMarkerGraphReverseComplementVertex()
a.accessAssemblyGraphEdgeLists()
a.createAssemblyGraphVertices()
|
validators/i18n/fi.py | shouhei/validators | 674 | 11159783 | <reponame>shouhei/validators<gh_stars>100-1000
import re
from validators.utils import validator
business_id_pattern = re.compile(r'^[0-9]{7}-[0-9]$')
ssn_checkmarks = '0123456789ABCDEFHJKLMNPRSTUVWXY'
ssn_pattern = re.compile(
r"""^
(?P<date>([0-2]\d|3[01])
(0\d|1[012])
(\d{{2}}))
[A+-]
(?P<serial>(\d{{3}}))
(?P<checksum>[{checkmarks}])$""".format(checkmarks=ssn_checkmarks),
re.VERBOSE | re.IGNORECASE
)
@validator
def fi_business_id(business_id):
"""
Validate a Finnish Business ID.
Each company in Finland has a distinct business id. For more
information see `Finnish Trade Register`_
.. _Finnish Trade Register:
http://en.wikipedia.org/wiki/Finnish_Trade_Register
Examples::
>>> fi_business_id('0112038-9') # Fast Monkeys Ltd
True
>>> fi_business_id('1234567-8') # Bogus ID
ValidationFailure(func=fi_business_id, ...)
.. versionadded:: 0.4
.. versionchanged:: 0.5
Method renamed from ``finnish_business_id`` to ``fi_business_id``
:param business_id: business_id to validate
"""
if not business_id or not re.match(business_id_pattern, business_id):
return False
factors = [7, 9, 10, 5, 8, 4, 2]
numbers = map(int, business_id[:7])
checksum = int(business_id[8])
sum_ = sum(f * n for f, n in zip(factors, numbers))
modulo = sum_ % 11
return (11 - modulo == checksum) or (modulo == 0 and checksum == 0)
@validator
def fi_ssn(ssn):
"""
Validate a Finnish Social Security Number.
This validator is based on `django-localflavor-fi`_.
.. _django-localflavor-fi:
https://github.com/django/django-localflavor-fi/
Examples::
>>> fi_ssn('010101-0101')
True
>>> fi_ssn('101010-0102')
ValidationFailure(func=fi_ssn, args={'ssn': '101010-0102'})
.. versionadded:: 0.5
:param ssn: Social Security Number to validate
"""
if not ssn:
return False
result = re.match(ssn_pattern, ssn)
if not result:
return False
gd = result.groupdict()
checksum = int(gd['date'] + gd['serial'])
return (
ssn_checkmarks[checksum % len(ssn_checkmarks)] ==
gd['checksum'].upper()
)
|
2018-google-quals/sftp/find_password.py | integeruser/on-pwning | 104 | 11159803 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import z3
for PASSWORD_LENGTH in range(1, 16):
solver = z3.Solver()
password = [z3.BitVec('c{}'.format(i), 64) for i in range(PASSWORD_LENGTH)]
for i in range(PASSWORD_LENGTH):
# costraints not really needed, just for finding a password composed of letters
solver.add(
z3.Or(
z3.And(password[i] >= ord('a'), password[i] <= ord('z')),
z3.And(password[i] >= ord('A'), password[i] <= ord('Z'))))
checksum = z3.BitVecVal(0x5417, 64)
rax = 0x0
for i in range(PASSWORD_LENGTH):
rax = rax & 0xffffffffffff0000 | password[i]
rax = rax ^ checksum
checksum = 2 * rax
solver.add(checksum & 0xffff == 0x8DFA)
if solver.check() == z3.sat:
print 'A valid password is:', ''.join(chr(solver.model()[c].as_long()) for c in password)
break
# A valid password is: <PASSWORD>
|
train.py | jack-willturner/DeepCompression-PyTorch | 149 | 11159835 | <filename>train.py
"""Train base models to later be pruned"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import argparse
import random
import numpy as np
from models import get_model
from utils import *
from tqdm import tqdm
################################################################## ARGUMENT PARSING
parser = argparse.ArgumentParser(description="PyTorch CIFAR10 Training")
parser.add_argument(
"--model", default="resnet18", help="resnet9/18/34/50, wrn_40_2/_16_2/_40_1"
)
parser.add_argument("--data_loc", default="/disk/scratch/datasets/cifar", type=str)
parser.add_argument("--checkpoint", default=None, type=str)
parser.add_argument("--n_gpus", default=0, type=int, help="Number of GPUs to use")
### training specific args
parser.add_argument("--epochs", default=200, type=int)
parser.add_argument("--lr", default=0.1)
parser.add_argument(
"--lr_decay_ratio", default=0.2, type=float, help="learning rate decay"
)
parser.add_argument("--weight_decay", default=0.0005, type=float)
### reproducibility
parser.add_argument("--seed", default=1, type=int)
args = parser.parse_args()
print(args.data_loc)
################################################################## REPRODUCIBILITY
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
################################################################## MODEL LOADING
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
select_devices(num_gpus_to_use=args.n_gpus)
model = get_model(args.model)
if torch.cuda.is_available():
model = model.cuda()
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model.to(device)
if args.checkpoint is None:
args.checkpoint = args.model
################################################################## TRAINING HYPERPARAMETERS
trainloader, testloader = get_cifar_loaders(args.data_loc)
optimizer = optim.SGD(
[w for name, w in model.named_parameters() if not "mask" in name],
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay,
)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, args.epochs, eta_min=1e-10)
criterion = nn.CrossEntropyLoss()
################################################################## ACTUAL TRAINING
error_history = []
for epoch in tqdm(range(args.epochs)):
train(model, trainloader, criterion, optimizer)
validate(
model,
epoch,
testloader,
criterion,
checkpoint=args.checkpoint if epoch != 2 else args.checkpoint + "_init",
seed=args.seed,
)
scheduler.step()
|
mne/io/snirf/__init__.py | stevemats/mne-python | 1,953 | 11159941 | """SNIRF module for conversion to FIF."""
# Author: <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
from ._snirf import read_raw_snirf
|
datasets/DET/seed/impl/OpenImages.py | zhangzhengde0225/SwinTrack | 143 | 11159972 | from datasets.DET.constructor.base_interface import DetectionDatasetConstructor
from datasets.types.data_split import DataSplit
import os
import csv
from collections import namedtuple
from data.types.bounding_box_format import BoundingBoxFormat
def construct_OpenImages(constructor: DetectionDatasetConstructor, seed):
root_path = seed.root_path
data_split = seed.data_split
splits = []
if data_split & DataSplit.Training:
splits.append('train')
if data_split & DataSplit.Validation:
splits.append('validation')
if data_split & DataSplit.Testing:
splits.append('test')
class_mids = []
class_names = []
for line in open(os.path.join(root_path, 'class-descriptions-boxable.csv'), 'r', encoding='utf-8'):
line = line.strip()
if len(line) == 0:
continue
words = line.split(',')
assert len(words) == 2
class_mids.append(words[0])
class_names.append(words[1])
mid_index_mapper = {mid: index for index, mid in enumerate(class_mids)}
def _construct_sub_dataset(images_path: str, annotation_file_path: str):
constructor.set_category_id_name_map({index: name for index, name in enumerate(class_names)})
images = {}
with open(annotation_file_path, 'r', encoding='utf-8') as fid:
csv_reader = csv.reader(fid)
headings = next(csv_reader)
Row = namedtuple('Row', headings)
last_row_image = None
image_annos = []
for r in csv_reader:
row = Row(*r)
image_name = row.ImageID
if last_row_image != image_name:
if last_row_image is not None:
images[last_row_image] = image_annos
image_annos = []
last_row_image = image_name
image_annos.append(row)
if last_row_image is not None:
images[last_row_image] = image_annos
constructor.set_total_number_of_images(len(images))
constructor.set_bounding_box_format(BoundingBoxFormat.XYXY)
for image_name, image_annos in images.items():
with constructor.new_image() as image_constructor:
image_constructor.set_path(os.path.join(images_path, image_name + '.jpg'))
image_size = image_constructor.get_image_size()
for image_anno in image_annos:
object_category = mid_index_mapper[image_anno.LabelName]
bounding_box = [float(image_anno.XMin) * image_size[0], float(image_anno.XMax) * image_size[0],
float(image_anno.YMin) * image_size[1], float(image_anno.YMax) * image_size[1]]
with image_constructor.new_object() as object_constructor:
object_constructor.set_category_id(object_category)
object_constructor.set_bounding_box(bounding_box)
object_constructor.merge_attributes({'IsOccluded': image_anno.IsOccluded, 'IsTruncated': image_anno.IsTruncated, 'IsGroupOf': image_anno.IsGroupOf,
'IsDepiction': image_anno.IsDepiction, 'IsInside': image_anno.IsInside})
if data_split & DataSplit.Training:
_construct_sub_dataset(os.path.join(root_path, 'train'), os.path.join(root_path, 'oidv6-train-annotations-bbox.csv'))
if data_split & DataSplit.Validation:
_construct_sub_dataset(os.path.join(root_path, 'validation'), os.path.join(root_path, 'validation-annotations-bbox.csv'))
if data_split & DataSplit.Testing:
_construct_sub_dataset(os.path.join(root_path, 'test'), os.path.join(root_path, 'test-annotations-bbox.csv'))
|
gobbli/test/experiment/test_base_experiment.py | awesome-archive/gobbli | 276 | 11159993 | <reponame>awesome-archive/gobbli<filename>gobbli/test/experiment/test_base_experiment.py
from pathlib import Path
import pytest
import ray
from gobbli.test.util import MockDataset, MockExperiment, MockModel, skip_if_no_gpu
def test_base_experiment_init(tmpdir):
tmpdir_path = Path(tmpdir)
ds = MockDataset.load()
# Create experiment
exp = MockExperiment(MockModel, ds, data_dir=tmpdir_path / "test")
assert exp.metadata_path.exists()
# Shouldn't be able to create another experiment without ignoring the ray initialization error
with pytest.raises(RuntimeError):
MockExperiment(MockModel, ds, data_dir=tmpdir_path / "test2")
MockExperiment(
MockModel, ds, data_dir=tmpdir_path / "test3", ignore_ray_initialized_error=True
)
# Shouldn't be able to create another experiment in the same path
with pytest.raises(ValueError):
MockExperiment(
MockModel,
ds,
data_dir=tmpdir_path / "test",
ignore_ray_initialized_error=True,
)
# ...unless we pass overwrite_existing = True
MockExperiment(
MockModel,
ds,
data_dir=tmpdir_path / "test",
ignore_ray_initialized_error=True,
overwrite_existing=True,
)
# Limit should be obeyed
assert len(exp.X) == len(MockDataset.X_TRAIN_VALID) + len(MockDataset.X_TEST)
assert len(exp.y) == len(MockDataset.Y_TRAIN_VALID) + len(MockDataset.Y_TEST)
exp_limit = MockExperiment(
MockModel,
ds,
limit=1,
data_dir=tmpdir_path / "test_limit",
ignore_ray_initialized_error=True,
)
assert len(exp_limit.X) == 1
assert len(exp_limit.y) == 1
def test_base_experiment_gpu(tmpdir, request):
skip_if_no_gpu(request.config)
tmpdir_path = Path(tmpdir)
ds = MockDataset.load()
MockExperiment(
MockModel,
ds,
data_dir=tmpdir_path / "test",
ray_kwargs={"num_gpus": 1},
ignore_ray_initialized_error=True,
)
# Make sure GPUs are available
# in a mock remote function
# They won't necessarily be available on the master process
@ray.remote(num_gpus=1)
def find_gpus():
return ray.get_gpu_ids()
assert len(ray.get(find_gpus.remote())) > 0
|
bookwyrm/migrations/0095_merge_20210911_2143.py | mouse-reeve/fedireads | 270 | 11159998 | <reponame>mouse-reeve/fedireads
# Generated by Django 3.2.4 on 2021-09-11 21:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0094_auto_20210911_1550"),
("bookwyrm", "0094_importitem_book_guess"),
]
operations = []
|
zentral/core/queues/backends/google_pubsub.py | arubdesu/zentral | 634 | 11160000 | <filename>zentral/core/queues/backends/google_pubsub.py
from importlib import import_module
import logging
import time
from django.utils.text import slugify
from kombu.utils import json
from google.api_core.exceptions import AlreadyExists
from google.cloud import pubsub_v1
from google.oauth2 import service_account
from zentral.conf import settings
logger = logging.getLogger('zentral.core.queues.backends.google_pubsub')
class BaseWorker:
name = "UNDEFINED"
counters = []
def setup_metrics_exporter(self, *args, **kwargs):
self.metrics_exporter = kwargs.pop("metrics_exporter", None)
if self.metrics_exporter:
for name, label in self.counters:
self.metrics_exporter.add_counter(name, [label])
self.metrics_exporter.start()
def inc_counter(self, name, label):
if self.metrics_exporter:
self.metrics_exporter.inc(name, label)
def log(self, msg, level, *args):
logger.log(level, "{} - {}".format(self.name, msg), *args)
def log_debug(self, msg, *args):
self.log(msg, logging.DEBUG, *args)
def log_info(self, msg, *args):
self.log(msg, logging.INFO, *args)
def log_error(self, msg, *args):
self.log(msg, logging.ERROR, *args)
class PreprocessWorker(BaseWorker):
name = "preprocess worker"
counters = (
("preprocessed_events", "routing_key"),
("produced_events", "event_type"),
)
def __init__(self, raw_events_topic, events_topic, credentials):
self.raw_events_topic = raw_events_topic
self.events_topic = events_topic
self.credentials = credentials
# preprocessors
self.preprocessors = {
preprocessor.routing_key: preprocessor
for preprocessor in self._get_preprocessors()
}
def _get_preprocessors(self):
for app in settings['apps']:
try:
preprocessors_module = import_module("{}.preprocessors".format(app))
except ImportError:
pass
else:
yield from getattr(preprocessors_module, "get_preprocessors")()
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
# subscriber client
self.log_info("initialize subscriber")
subscriber_client = pubsub_v1.SubscriberClient(credentials=self.credentials)
project_id = self.raw_events_topic.split("/")[1]
sub_path = subscriber_client.subscription_path(project_id, "raw-events-subscription")
# create subscription
try:
subscriber_client.create_subscription(sub_path, self.raw_events_topic)
except AlreadyExists:
self.log_info("preprocess worker subscription %s already exists", sub_path)
else:
self.log_info("preprocess worker subscription %s created", sub_path)
# publisher client
self.log_info("initialize publisher")
self.publisher_client = pubsub_v1.PublisherClient(credentials=self.credentials)
# async pull
self.log_info("start async pull")
pull_future = subscriber_client.subscribe(sub_path, self.callback)
with subscriber_client:
try:
pull_future.result()
except Exception:
pull_future.cancel()
def callback(self, message):
routing_key = message.attributes.get("routing_key")
if not routing_key:
self.log_error("Message w/o routing key")
else:
preprocessor = self.preprocessors.get(routing_key)
if not preprocessor:
self.log_error("No preprocessor for routing key %s", routing_key)
else:
for event in preprocessor.process_raw_event(json.loads(message.data)):
new_message = json.dumps(event.serialize(machine_metadata=False)).encode("utf-8")
self.publisher_client.publish(self.events_topic, new_message)
self.inc_counter("produced_events", event.event_type)
message.ack()
self.inc_counter("preprocessed_events", routing_key or "UNKNOWN")
class EnrichWorker(BaseWorker):
name = "enrich worker"
counters = (
("enriched_events", "event_type"),
("produced_events", "event_type"),
)
def __init__(self, events_topic, enriched_events_topic, credentials, enrich_event):
self.events_topic = events_topic
self.enriched_events_topic = enriched_events_topic
self.credentials = credentials
self.enrich_event = enrich_event
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
# subscriber client
self.log_info("initialize subscriber")
subscriber_client = pubsub_v1.SubscriberClient(credentials=self.credentials)
project_id = self.events_topic.split("/")[1]
sub_path = subscriber_client.subscription_path(project_id, "events-subscription")
# create subscription
try:
subscriber_client.create_subscription(sub_path, self.events_topic)
except AlreadyExists:
self.log_info("enrich worker subscription %s already exists", sub_path)
else:
self.log_info("enrich worker subscription %s created", sub_path)
# publisher client
self.log_info("initialize publisher")
self.publisher_client = pubsub_v1.PublisherClient(credentials=self.credentials)
# async pull
self.log_info("start async pull")
pull_future = subscriber_client.subscribe(sub_path, self.callback)
with subscriber_client:
try:
pull_future.result()
except Exception:
pull_future.cancel()
def callback(self, message):
event_dict = json.loads(message.data)
event_type = event_dict['_zentral']['type']
try:
for event in self.enrich_event(event_dict):
new_message = json.dumps(event.serialize(machine_metadata=True)).encode("utf-8")
self.publisher_client.publish(self.enriched_events_topic, new_message)
self.inc_counter("produced_events", event.event_type)
except Exception as exception:
logger.exception("Requeuing message with 1s delay: %s", exception)
time.sleep(1)
message.nack()
else:
message.ack()
self.inc_counter("enriched_events", event_type)
class ProcessWorker(BaseWorker):
name = "process worker"
counters = (
("processed_events", "event_type"),
)
def __init__(self, enriched_events_topic, credentials, process_event):
self.enriched_events_topic = enriched_events_topic
self.credentials = credentials
self.process_event = process_event
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
# subscriber client
self.log_info("initialize subscriber")
subscriber_client = pubsub_v1.SubscriberClient(credentials=self.credentials)
project_id = self.enriched_events_topic.split("/")[1]
sub_path = subscriber_client.subscription_path(project_id, "process-enriched-events-subscription")
# create subscription
try:
subscriber_client.create_subscription(sub_path, self.enriched_events_topic)
except AlreadyExists:
self.log_info("process worker subscription %s already exists", sub_path)
else:
self.log_info("process worker subscription %s created", sub_path)
# async pull
self.log_info("start async pull")
pull_future = subscriber_client.subscribe(sub_path, self.callback)
with subscriber_client:
try:
pull_future.result()
except Exception:
pull_future.cancel()
def callback(self, message):
event_dict = json.loads(message.data)
event_type = event_dict['_zentral']['type']
self.process_event(event_dict)
message.ack()
self.inc_counter("processed_events", event_type)
class StoreWorker(BaseWorker):
counters = (
("stored_events", "event_type"),
)
def __init__(self, enriched_events_topic, credentials, event_store):
self.enriched_events_topic = enriched_events_topic
self.credentials = credentials
self.event_store = event_store
self.name = "store worker {}".format(self.event_store.name)
def run(self, *args, **kwargs):
self.log_info("run")
super().setup_metrics_exporter(*args, **kwargs)
# subscriber client
self.log_info("initialize subscriber")
subscriber_client = pubsub_v1.SubscriberClient(credentials=self.credentials)
project_id = self.enriched_events_topic.split("/")[1]
sub_path = subscriber_client.subscription_path(
project_id,
"{}-store-enriched-events-subscription".format(slugify(self.event_store.name))
)
# create subscription
try:
subscriber_client.create_subscription(sub_path, self.enriched_events_topic)
except AlreadyExists:
self.log_info("store worker subscription %s already exists", sub_path)
else:
self.log_info("store worker subscription %s created", sub_path)
# prometheus
prometheus_port = kwargs.pop("prometheus_port", None)
if prometheus_port:
self.log_info("start prometheus server on port %s", prometheus_port)
self.start_prometheus_server(prometheus_port)
# async pull
self.log_info("start async pull")
pull_future = subscriber_client.subscribe(sub_path, self.callback)
with subscriber_client:
try:
pull_future.result()
except Exception:
pull_future.cancel()
def callback(self, message):
self.log_debug("store event")
event_dict = json.loads(message.data)
event_type = event_dict['_zentral']['type']
if not self.event_store.is_event_type_included(event_type):
self.log_debug("skip %s event", event_type)
message.ack()
return
try:
self.event_store.store(event_dict)
except Exception:
logger.exception("Could add event to store %s", self.event_store.name)
message.nack()
else:
message.ack()
self.inc_counter("stored_events", event_type)
class EventQueues(object):
def __init__(self, config_d):
# topics
topics = config_d["topics"]
self.raw_events_topic = topics["raw_events"]
self.events_topic = topics["events"]
self.enriched_events_topic = topics["enriched_events"]
# credentials
self.credentials = None
credentials_file = config_d.get("credentials")
if credentials_file:
credentials = service_account.Credentials.from_service_account_file(credentials_file)
self.credentials = credentials.with_scopes(["https://www.googleapis.com/auth/cloud-platform"])
# publisher client
self.publisher_client = None
def _publish(self, topic, event_dict, **kwargs):
message = json.dumps(event_dict).encode("utf-8")
if self.publisher_client is None:
self.publisher_client = pubsub_v1.PublisherClient(credentials=self.credentials)
self.publisher_client.publish(topic, message, **kwargs)
def get_preprocess_worker(self):
return PreprocessWorker(self.raw_events_topic, self.events_topic, self.credentials)
def get_enrich_worker(self, enrich_event):
return EnrichWorker(self.events_topic, self.enriched_events_topic, self.credentials, enrich_event)
def get_process_worker(self, process_event):
return ProcessWorker(self.enriched_events_topic, self.credentials, process_event)
def get_store_worker(self, event_store):
return StoreWorker(self.enriched_events_topic, self.credentials, event_store)
def post_raw_event(self, routing_key, raw_event):
self._publish(self.raw_events_topic, raw_event, routing_key=routing_key)
def post_event(self, event):
self._publish(self.events_topic, event.serialize(machine_metadata=False))
|
test/lib/test_util.py | Jinnrry/reversi-alpha-zero | 699 | 11160040 | <gh_stars>100-1000
from nose.tools.trivial import eq_
from reversi_zero.lib import util
from reversi_zero.lib.bitboard import board_to_string
def test_parse_to_bitboards_init():
ex = '''
##########
# #
# #
# #
# OX #
# XO #
# #
# #
# #
##########
'''
black, white = util.parse_to_bitboards(ex)
eq_(black, 0b00001000 << 24 | 0b00010000 << 32, f"{ex}\n-------\n{board_to_string(black, white)}")
eq_(white, 0b00010000 << 24 | 0b00001000 << 32, f"{ex}\n-------\n{board_to_string(black, white)}")
def test_parse_to_bitboards():
ex = '''
##########
#OO #
#XOO #
#OXOOO #
# XOX #
# XXX #
# X #
# X #
# X#
##########'''
black, white = util.parse_to_bitboards(ex)
eq_(ex.strip(), board_to_string(black, white).strip(), f"{ex}\n-------\n{board_to_string(black, white)}")
|
finplot/examples/line.py | shujaatak/finplot | 501 | 11160077 | #!/usr/bin/env python3
import finplot as fplt
import numpy as np
import pandas as pd
dates = pd.date_range('01:00', '01:00:01.200', freq='1ms')
prices = pd.Series(np.random.random(len(dates))).rolling(30).mean() + 4
fplt.plot(dates, prices, width=3)
line = fplt.add_line((dates[100], 4.4), (dates[1100], 4.6), color='#9900ff', interactive=True)
## fplt.remove_primitive(line)
text = fplt.add_text((dates[500], 4.6), "I'm here alright!", color='#bb7700')
## fplt.remove_primitive(text)
rect = fplt.add_rect((dates[700], 4.5), (dates[850], 4.4), color='#8c8', interactive=True)
## fplt.remove_primitive(rect)
def save():
fplt.screenshot(open('screenshot.png', 'wb'))
fplt.timer_callback(save, 0.5, single_shot=True) # wait some until we're rendered
fplt.show()
|
veles/tests/test_mutable.py | AkshayJainG/veles | 1,007 | 11160081 | <filename>veles/tests/test_mutable.py
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Apr 23, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import unittest
from veles.mutable import Bool, LinkableAttribute
from veles.pickle2 import pickle
class A(object):
pass
class B(object):
pass
class C(object):
def __init__(self):
self.number = 255
class Test(unittest.TestCase):
def set_flag(self, b):
b.flag = True
def testBool(self):
a = Bool()
self.assertFalse(a)
b = Bool(True)
self.assertTrue(b)
c = a | b
self.assertTrue(c)
b <<= False
self.assertFalse(a)
self.assertFalse(b)
self.assertFalse(c)
c = a & b
self.assertFalse(c)
c.flag = False
c.on_false = self.set_flag
a <<= True
self.assertTrue(a)
self.assertFalse(b)
self.assertFalse(c)
self.assertTrue(c.flag)
c.on_false = None
c.on_true = self.set_flag
b <<= True
self.assertTrue(a)
self.assertTrue(b)
self.assertTrue(c)
self.assertTrue(c.flag)
a.unref(c)
b.unref(c)
c.flag = False
c.on_false = self.set_flag
c.on_true = None
b <<= False
self.assertFalse(c.flag)
b <<= True
c = a ^ b
self.assertTrue(a)
self.assertTrue(b)
self.assertFalse(c)
a <<= False
self.assertFalse(a)
self.assertTrue(c)
b <<= False
self.assertFalse(a)
self.assertFalse(b)
self.assertFalse(c)
c = ~a
self.assertFalse(a)
self.assertTrue(c)
a <<= True
self.assertTrue(a)
self.assertFalse(c)
c = a & ~b
self.assertTrue(c)
def testBoolPickling(self):
a = Bool()
self.assertFalse(a)
b = Bool(True)
self.assertTrue(b)
c = a | b
c.on_true = self.set_flag
c2 = pickle.loads(pickle.dumps(c))
self.assertEqual(None, c2.on_true)
def testLinkableAttribute(self):
a = A()
a.number = 77
b = B()
LinkableAttribute(b, "number", (a, "number"), assignment_guard=False)
# link(b, "number", a, "number")
self.assertEqual(77, a.number)
self.assertEqual(77, b.number)
a.number = 100
self.assertEqual(100, a.number)
self.assertEqual(100, b.number)
b.number = 40
self.assertEqual(100, a.number)
self.assertEqual(40, b.number)
LinkableAttribute(b, "number", (a, "number"), True)
b.number = 77
self.assertEqual(77, a.number)
self.assertEqual(77, b.number)
self.assertRaises(ValueError,
LinkableAttribute(b, "number", (b, "number")))
def testLinkableAttribute100(self):
a = A()
a.number = 77
b = []
bb = []
bbb = []
for i in range(100):
b.append(B())
LinkableAttribute(b[i], "number", (a, "number"))
bb.append(B())
LinkableAttribute(bb[i], "number", (b[i], "number"))
bbb.append(B())
LinkableAttribute(bbb[i], "number",
(bbb[i - 1] if i else bb[i], "number"))
a.number = 123
for i in range(100):
self.assertEqual(b[i].number, 123)
self.assertEqual(bb[i].number, 123)
self.assertEqual(bbb[i].number, 123)
def testLinkableAttributeConstructorAssignment(self):
a = A()
a.number = 77
c = C()
LinkableAttribute(c, "number", (a, "number"))
c2 = C() # exception should not be here
LinkableAttribute(c2, "number", (a, "number"))
def testLinkableAttributeConstructorAssignment100(self):
a = A()
a.number = 77
c = []
cc = []
ccc = []
for i in range(100):
c.append(C())
LinkableAttribute(c[i], "number", (a, "number"))
cc.append(C())
LinkableAttribute(cc[i], "number", (c[i], "number"))
ccc.append(B())
LinkableAttribute(ccc[i], "number",
(ccc[i - 1] if i else cc[i], "number"))
a.number = 123
for i in range(100):
self.assertEqual(c[i].number, 123)
self.assertEqual(cc[i].number, 123)
self.assertEqual(ccc[i].number, 123)
def testLinkableAttributeOneWay(self):
class AC(object):
pass
class BC(object):
pass
a = AC()
a.number = 77
b = BC()
b.number = 88
LinkableAttribute(b, "number", (a, "number"))
self.assertEqual(b.number, 77)
try:
b.number = 99
err = True
except RuntimeError:
err = False
self.assertFalse(err, "Two way assignment disabled")
def testLinkablePickling(self):
a = A()
a.number = 77
b = B()
LinkableAttribute(b, "number", (a, "number"))
new_a, new_b = pickle.loads(pickle.dumps((a, b)))
self.assertEqual(new_b.number, 77)
self.assertEqual(new_a.number, 77)
new_a.number = 100
self.assertEqual(new_b.number, 100)
if __name__ == "__main__":
unittest.main()
|
ext.py | semirook/flask-kit | 133 | 11160110 | <reponame>semirook/flask-kit
# coding: utf-8
"""
ext
~~~
Good place for pluggable extensions.
:copyright: (c) 2015 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
from flask_debugtoolbar import DebugToolbarExtension
from flask_gravatar import Gravatar
from flask_login import LoginManager
from flask_sqlalchemy import SQLAlchemy
from flask_assets import Environment
db = SQLAlchemy()
assets = Environment()
login_manager = LoginManager()
gravatar = Gravatar(size=50)
toolbar = DebugToolbarExtension()
# Almost any modern Flask extension has special init_app()
# method for deferred app binding. But there are a couple of
# popular extensions that no nothing about such use case.
# Or, maybe, you have to use some app.config settings
# gravatar = lambda app: Gravatar(app, size=50)
|
tests/exceptions/source/ownership/_init.py | ponponon/loguru | 11,391 | 11160152 | <filename>tests/exceptions/source/ownership/_init.py
import os
import sys
import sysconfig
usersite = os.path.abspath(os.path.join(os.path.dirname(__file__), "usersite"))
sys.path.append(usersite)
sysconfig._INSTALL_SCHEMES["posix_user"]["purelib"] = usersite
|
tests/misc/test_flop_count.py | wenliangzhao2018/d2go | 687 | 11160158 | <reponame>wenliangzhao2018/d2go<filename>tests/misc/test_flop_count.py<gh_stars>100-1000
import os
import tempfile
from d2go.utils.flop_calculator import dump_flops_info
from d2go.utils.testing.data_loader_helper import create_fake_detection_data_loader
from d2go.utils.testing.rcnn_helper import RCNNBaseTestCases
class TestFlopCount(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file("detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml")
def test_flop_count(self):
size_divisibility = max(self.test_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
with create_fake_detection_data_loader(h, w, is_train=False) as data_loader:
inputs = (next(iter(data_loader)),)
with tempfile.TemporaryDirectory(prefix="d2go_test") as output_dir:
dump_flops_info(self.test_model, inputs, output_dir)
for fname in [
"flops_str_mobilecv",
"flops_str_fvcore",
"flops_table_fvcore",
]:
outf = os.path.join(output_dir, fname + ".txt")
self.assertTrue(os.path.isfile(outf))
|
nuplan/common/maps/nuplan_map/test/test_generic_polygon_map.py | motional/nuplan-devkit | 128 | 11160162 | <filename>nuplan/common/maps/nuplan_map/test/test_generic_polygon_map.py
from typing import Any, Dict
import pytest
from nuplan.common.actor_state.state_representation import Point2D
from nuplan.common.maps.abstract_map import SemanticMapLayer
from nuplan.common.maps.abstract_map_objects import PolygonMapObject
from nuplan.common.maps.nuplan_map.map_factory import NuPlanMapFactory
from nuplan.common.maps.test_utils import add_map_objects_to_scene
from nuplan.common.utils.testing.nuplan_test import NUPLAN_TEST_PLUGIN, nuplan_test
from nuplan.database.tests.nuplan_db_test_utils import get_test_maps_db
maps_db = get_test_maps_db()
map_factory = NuPlanMapFactory(maps_db)
@nuplan_test(path='json/crosswalks/nearby.json')
def test_get_nearby_crosswalks(scene: Dict[str, Any]) -> None:
"""
Test getting nearby crosswalks.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_distance, expected_id in zip(
scene["markers"], scene["xtr"]["expected_nearest_distance"], scene["xtr"]["expected_nearest_id"]
):
pose = marker["pose"]
crosswalk_id, distance = nuplan_map.get_distance_to_nearest_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.CROSSWALK
)
assert crosswalk_id is not None
assert expected_distance == distance
assert expected_id == crosswalk_id
crosswalk: PolygonMapObject = nuplan_map.get_map_object(crosswalk_id, SemanticMapLayer.CROSSWALK)
add_map_objects_to_scene(scene, [crosswalk])
@nuplan_test(path='json/crosswalks/on_crosswalk.json')
def test_get_crosswalk(scene: Dict[str, Any]) -> None:
"""
Test getting crosswalk at a point.
"""
nuplan_map = map_factory.build_map_from_name(scene["map"]["area"])
for marker, expected_id in zip(scene["markers"], scene["xtr"]["expected_nearest_id"]):
pose = marker["pose"]
crosswalk: PolygonMapObject = nuplan_map.get_one_map_object(
Point2D(pose[0], pose[1]), SemanticMapLayer.CROSSWALK
)
assert crosswalk is not None
assert expected_id == crosswalk.id
assert crosswalk.contains_point(Point2D(pose[0], pose[1]))
add_map_objects_to_scene(scene, [crosswalk])
if __name__ == "__main__":
raise SystemExit(pytest.main([__file__], plugins=[NUPLAN_TEST_PLUGIN]))
|
etl/parsers/etw/Microsoft_Windows_Sensors.py | IMULMUL/etl-parser | 104 | 11160167 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Sensors
GUID : d8900e18-36cb-4548-966f-13f068d1f78e
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1001, version=0)
class Microsoft_Windows_Sensors_1001_0(Etw):
pattern = Struct(
"DeviceID" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1002, version=0)
class Microsoft_Windows_Sensors_1002_0(Etw):
pattern = Struct(
"DeviceID" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1003, version=0)
class Microsoft_Windows_Sensors_1003_0(Etw):
pattern = Struct(
"SensorType" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1004, version=0)
class Microsoft_Windows_Sensors_1004_0(Etw):
pattern = Struct(
"SensorType" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1005, version=0)
class Microsoft_Windows_Sensors_1005_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1006, version=0)
class Microsoft_Windows_Sensors_1006_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1007, version=0)
class Microsoft_Windows_Sensors_1007_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1008, version=0)
class Microsoft_Windows_Sensors_1008_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1009, version=0)
class Microsoft_Windows_Sensors_1009_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1010, version=0)
class Microsoft_Windows_Sensors_1010_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1011, version=0)
class Microsoft_Windows_Sensors_1011_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1012, version=0)
class Microsoft_Windows_Sensors_1012_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1013, version=0)
class Microsoft_Windows_Sensors_1013_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1014, version=0)
class Microsoft_Windows_Sensors_1014_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1015, version=0)
class Microsoft_Windows_Sensors_1015_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"Timestamp" / SystemTime,
"NumSubscribers" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1016, version=0)
class Microsoft_Windows_Sensors_1016_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1017, version=0)
class Microsoft_Windows_Sensors_1017_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1100, version=0)
class Microsoft_Windows_Sensors_1100_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid,
"Timestamp" / SystemTime
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1101, version=0)
class Microsoft_Windows_Sensors_1101_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid,
"Timestamp" / SystemTime
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1102, version=0)
class Microsoft_Windows_Sensors_1102_0(Etw):
pattern = Struct(
"WorkingSet" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1103, version=0)
class Microsoft_Windows_Sensors_1103_0(Etw):
pattern = Struct(
"CPUUsage" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1104, version=0)
class Microsoft_Windows_Sensors_1104_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid,
"Timestamp" / SystemTime,
"HRESULT" / Int32ul,
"ReportPointer" / Int64ul,
"SensorStateThisSensor" / Int32ul,
"SensorStateAccumulated" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1105, version=0)
class Microsoft_Windows_Sensors_1105_0(Etw):
pattern = Struct(
"QuadrantAngle" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1106, version=0)
class Microsoft_Windows_Sensors_1106_0(Etw):
pattern = Struct(
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1107, version=0)
class Microsoft_Windows_Sensors_1107_0(Etw):
pattern = Struct(
"AccelerationX" / Float32l,
"AccelerationY" / Float32l,
"AccelerationZ" / Float32l,
"PitchCalibration" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1108, version=0)
class Microsoft_Windows_Sensors_1108_0(Etw):
pattern = Struct(
"IsPitchGreaterThanThreshold" / Int32ul,
"PitchAngle" / Float32l,
"PitchThreshold" / Float32l,
"PitchCalibration" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1109, version=0)
class Microsoft_Windows_Sensors_1109_0(Etw):
pattern = Struct(
"Theta" / Float32l,
"AngularCalibration" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1110, version=0)
class Microsoft_Windows_Sensors_1110_0(Etw):
pattern = Struct(
"Theta" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1111, version=0)
class Microsoft_Windows_Sensors_1111_0(Etw):
pattern = Struct(
"GoodAngle" / Int32ul,
"Quadrant" / Int32ul,
"LastQuadrant" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1112, version=0)
class Microsoft_Windows_Sensors_1112_0(Etw):
pattern = Struct(
"TimerQueueStatus" / Int32ul,
"TimerQueueAction" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1113, version=0)
class Microsoft_Windows_Sensors_1113_0(Etw):
pattern = Struct(
"GoodAngle" / Int32ul,
"Quadrant" / Int32ul,
"LastQuadrant" / Int32ul,
"Theta" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1114, version=0)
class Microsoft_Windows_Sensors_1114_0(Etw):
pattern = Struct(
"GoodAngle" / Int32ul,
"Quadrant" / Int32ul,
"AngularThreshold" / Float32l,
"Theta" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1119, version=0)
class Microsoft_Windows_Sensors_1119_0(Etw):
pattern = Struct(
"QuadrantAngle" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1120, version=0)
class Microsoft_Windows_Sensors_1120_0(Etw):
pattern = Struct(
"QuadrantAngle" / Float32l
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1121, version=0)
class Microsoft_Windows_Sensors_1121_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid,
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1122, version=0)
class Microsoft_Windows_Sensors_1122_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid,
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1123, version=0)
class Microsoft_Windows_Sensors_1123_0(Etw):
pattern = Struct(
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1124, version=0)
class Microsoft_Windows_Sensors_1124_0(Etw):
pattern = Struct(
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1207, version=0)
class Microsoft_Windows_Sensors_1207_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1208, version=0)
class Microsoft_Windows_Sensors_1208_0(Etw):
pattern = Struct(
"SensorObjectId" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1209, version=0)
class Microsoft_Windows_Sensors_1209_0(Etw):
pattern = Struct(
"EventNumber" / Int32ul,
"EventCount" / Int32ul,
"SENSOR_ID" / Guid,
"ConnectedClients" / Int32ul,
"SubscribedClients" / Int32ul,
"SilentClients" / Int32ul,
"PID" / Int32ul,
"ClientBitfield" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1301, version=0)
class Microsoft_Windows_Sensors_1301_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"Timestamp" / SystemTime
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1302, version=0)
class Microsoft_Windows_Sensors_1302_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"Timestamp" / SystemTime
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1303, version=0)
class Microsoft_Windows_Sensors_1303_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1304, version=0)
class Microsoft_Windows_Sensors_1304_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1305, version=0)
class Microsoft_Windows_Sensors_1305_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1306, version=0)
class Microsoft_Windows_Sensors_1306_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1307, version=0)
class Microsoft_Windows_Sensors_1307_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1308, version=0)
class Microsoft_Windows_Sensors_1308_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1309, version=0)
class Microsoft_Windows_Sensors_1309_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1310, version=0)
class Microsoft_Windows_Sensors_1310_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1311, version=0)
class Microsoft_Windows_Sensors_1311_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1312, version=0)
class Microsoft_Windows_Sensors_1312_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"SensorState" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1501, version=0)
class Microsoft_Windows_Sensors_1501_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1502, version=0)
class Microsoft_Windows_Sensors_1502_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1503, version=0)
class Microsoft_Windows_Sensors_1503_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1504, version=0)
class Microsoft_Windows_Sensors_1504_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1505, version=0)
class Microsoft_Windows_Sensors_1505_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1506, version=0)
class Microsoft_Windows_Sensors_1506_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1507, version=0)
class Microsoft_Windows_Sensors_1507_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1508, version=0)
class Microsoft_Windows_Sensors_1508_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1509, version=0)
class Microsoft_Windows_Sensors_1509_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"HRESULT" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1510, version=0)
class Microsoft_Windows_Sensors_1510_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"HRESULT" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1511, version=0)
class Microsoft_Windows_Sensors_1511_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"HRESULT" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=1512, version=0)
class Microsoft_Windows_Sensors_1512_0(Etw):
pattern = Struct(
"SensorObjectId" / WString,
"SENSOR_ID" / Guid,
"HRESULT" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=3001, version=0)
class Microsoft_Windows_Sensors_3001_0(Etw):
pattern = Struct(
"GenericMessage" / WString
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=3002, version=0)
class Microsoft_Windows_Sensors_3002_0(Etw):
pattern = Struct(
"SENSOR_ID" / Guid,
"MethodId" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=10001, version=0)
class Microsoft_Windows_Sensors_10001_0(Etw):
pattern = Struct(
"EventId" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=10002, version=0)
class Microsoft_Windows_Sensors_10002_0(Etw):
pattern = Struct(
"EventId" / Int32ul
)
@declare(guid=guid("d8900e18-36cb-4548-966f-13f068d1f78e"), event_id=10003, version=0)
class Microsoft_Windows_Sensors_10003_0(Etw):
pattern = Struct(
"EventId" / Int32ul,
"QuadrantChangeInterval" / Int32ul,
"Angle" / Int32ul
)
|
psutil_example/print_process_memory/print_process_by_memory.py | DazEB2/SimplePyScripts | 117 | 11160176 | <reponame>DazEB2/SimplePyScripts<filename>psutil_example/print_process_memory/print_process_by_memory.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import sys
from pathlib import Path
from collections import defaultdict
# pip install psutil
import psutil
sys.path.append(str(Path(__file__).resolve().parent.parent))
from human_byte_size import sizeof_fmt
column_width = defaultdict(int)
process_list = []
for p in psutil.process_iter():
memory = p.memory_info().rss
cols = p.name(), str(memory) + ' bytes', sizeof_fmt(memory)
process_list.append(cols)
for i, x in enumerate(cols):
column_width[i] = max(column_width[i], len(x))
# Sort by memory size
process_list.sort(key=lambda x: int(x[1].split(' ')[0]), reverse=True)
for p in process_list:
row = [x.rjust(column_width[i]) for i, x in enumerate(p)]
print(' | '.join(row))
|
whynot/simulators/civil_violence/simulator.py | yoshavit/whynot | 376 | 11160189 | <filename>whynot/simulators/civil_violence/simulator.py
"""Simulate and analyze runs from the civil violence model."""
import dataclasses
from mesa.datacollection import DataCollector
import numpy as np
from whynot.simulators.civil_violence.model import CivilViolenceModel
@dataclasses.dataclass
class Agent:
# pylint: disable-msg=too-few-public-methods
"""Covariates for a single agent in the simulation.
Examples
--------
>>> # An agent with low risk aversion and high hardship
>>> civil_violence.Agent(hardship=0.99, risk_aversion=0.01)
"""
#: How aggrieved is the agent by external circumstances.
hardship: float = 0.5
#: How legitimate does the agent perceive the current regime.
legitimacy: float = 0.5
#: Threshold above which agent starts to openly rebell
active_threshold: float = 0.1
#: How likely the agent is to rebel for a fixed greviance level.
risk_aversion: float = 0.5
#: How many adjacent squares an agent sees and uses to determine probability of arrest.
vision: int = 3
@dataclasses.dataclass
class Config:
# pylint: disable-msg=too-few-public-methods
"""Metaparameters for a single run of the civil violence simulator."""
#: Vertical grid size
grid_height: int = 50
#: Horizontal grid size
grid_width: int = 50
#: What fraction of the agents are police
cop_fraction: float = 0.05
#: How many adjacent squares a cop can see when determining who to arrest.
cop_vision: int = 5
# What's the longest time a citizen can stay in jail?
max_jail_term: int = 5
#: How strongly other agent grievances affect each agent in prison.
prison_interaction: float = 0.1
#: A fixed parameter to calibrate arrest likelihood.
arrest_prob_constant: float = 2.3
def count_type_citizens(model, condition, exclude_jailed=True):
"""Count agents as either Quiescent/Active."""
count = 0
for agent in model.schedule.agents:
if agent.breed == "cop":
continue
if exclude_jailed and agent.jail_sentence:
continue
if agent.condition == condition:
count += 1
return count
def count_jailed(model):
"""Count number of jailed agents."""
count = 0
for agent in model.schedule.agents:
if agent.breed == "citizen" and agent.jail_sentence:
count += 1
return count
def simulate(agents, config, seed=None, max_steps=1000):
"""Simulate a run of the civil violence model.
Parameters
----------
agents: list
List of whynot.simulators.civil_violence.Agent to populate the model
config: whynot.simulators.civil_violence.Config
Simulation parameters
seed: int
(Optional) Seed for all randomness in model setup and execution.
max_steps: int
Maximum number of steps to run the civil_violence model.
Returns
-------
observations: pd.DataFrame
Pandas dataframe containing the "observations" recorded for each
agent. Observations are defined in the `agent_reporter` and include
agent attributes along with:
"pos" # position on the grid
"jail_sentence" # agent's jail sentence at model end
"condition" # agent's condition (rebelling or acquiesent) at model end
"arrest_probability" # agent's probability of arrest
"arrests" # number of time agent has been arrested
"days_active" # how long as the agent spent in rebellion
"""
# Ensure everything will fit on the grid
num_cells = config.grid_height * config.grid_width
num_cops = int(np.floor(len(agents) * config.cop_fraction))
assert len(agents) + num_cops < num_cells
model = CivilViolenceModel(
height=config.grid_height,
width=config.grid_width,
cop_vision=config.cop_vision,
max_jail_term=config.max_jail_term,
prison_interaction=config.prison_interaction,
arrest_prob_constant=config.arrest_prob_constant,
max_steps=max_steps,
seed=seed,
)
# Place agents on grid
for i, agent in enumerate(agents):
model.add_agent(
i,
model.find_empty(),
agent.hardship,
agent.legitimacy,
agent.risk_aversion,
agent.active_threshold,
agent.vision,
)
for i in range(num_cops):
model.add_cop(i + len(agents), model.find_empty())
# Which attributes to report
agent_reporters = {
"pos": "pos",
"breed": "breed",
"jail_sentence": "jail_sentence",
"condition": "condition",
"arrest_probability": "arrest_probability",
"arrests": "arrests",
"hardship": "hardship",
"regime_legitimacy": "regime_legitimacy",
"days_active": "days_active",
"risk_aversion": "risk_aversion",
"threshold": "threshold",
"arrest_parameter": "arrest_parameter",
"vision": "vision",
}
datacollector = DataCollector(agent_reporters=agent_reporters)
while model.running:
model.step()
datacollector.collect(model)
dataframe = datacollector.get_agent_vars_dataframe()
observations = dataframe[dataframe.breed == "citizen"].drop(columns="breed")
return observations
|
vaas-app/src/vaas/router/tests/test_views.py | allegro/vaas | 251 | 11160226 | <filename>vaas-app/src/vaas/router/tests/test_views.py<gh_stars>100-1000
from django.test import TestCase
from vaas.router.models import Route
from vaas.router.forms import RouteModelForm
from vaas.manager.models import Director, Probe, TimeProfile
from vaas.cluster.models import LogicalCluster
from django.urls import reverse
class TestPrioritiesView(TestCase):
def setUp(self):
self.probe = Probe.objects.create(name='default_probe', url='/ts.1')
self.cluster1 = LogicalCluster.objects.create(name="first cluster")
self.cluster2 = LogicalCluster.objects.create(name="second cluster")
self.director1 = Director.objects.create(
name='first_gamma',
route_expression='/first',
mode='random',
probe=self.probe,
time_profile=TimeProfile.objects.create(name='beta')
)
self.director2 = Director.objects.create(
name='second_gamma',
route_expression='/second',
mode='random',
probe=self.probe,
time_profile=TimeProfile.objects.create(name='alfa')
)
def test_should_check_if_taken_priority_is_excluded_from_available_values(self):
self.director1.cluster.add(self.cluster1)
route = Route.objects.create(
condition='some condition',
priority=51,
director=self.director1,
action='pass',
)
route.clusters.add(self.cluster1)
response = self.client.get(
reverse('router:priorities', args=(self.director1.id, 0, 0)),
{'clusters': self.cluster1.id}
)
self.assertEqual(response.status_code, 200)
available_values = response.json()['values']
self.assertTrue(route.priority not in available_values)
def test_should_check_if_taken_priority_is_excluded_from_available_values_when_clusters_in_sync(self):
self.director1.cluster.add(self.cluster1)
route = Route.objects.create(
condition='some condition',
priority=51,
director=self.director1,
action='pass',
clusters_in_sync=True
)
route.clusters.add(self.cluster2)
response = self.client.get(
reverse('router:priorities', args=(self.director1.id, 0, 0)),
{'clusters': self.cluster1.id}
)
self.assertEqual(response.status_code, 200)
available_values = response.json()['values']
self.assertTrue(route.priority not in available_values)
def test_should_check_if_taken_priority_excluded_for_director_cluster_when_clusters_in_sync(self):
self.director1.cluster.add(self.cluster1)
route = Route.objects.create(
condition='some condition',
priority=51,
director=self.director1,
action='pass',
clusters_in_sync=True
)
route.clusters.add(self.cluster2)
response = self.client.get(
reverse('router:priorities', args=(self.director1.id, 0, 0)),
{'clusters': self.cluster2.id}
)
self.assertEqual(response.status_code, 200)
available_values = response.json()['values']
self.assertTrue(route.priority in available_values)
def test_should_check_if_priority_calculated_properly_when_disabling_clusters_in_sync(self):
self.director1.cluster.add(self.cluster1)
route_1 = Route.objects.create(
condition='some condition',
priority=51,
director=self.director1,
action='pass',
)
route_1.clusters.add(self.cluster2)
route_2 = Route.objects.create(
condition='some condition',
priority=51,
director=self.director1,
action='pass',
clusters_in_sync=True
)
route_2.clusters.add(self.cluster2)
response = self.client.get(
reverse('router:priorities', args=(self.director1.id, route_2.id, route_2.priority)),
{'clusters': self.cluster2.id}
)
self.assertEqual(response.status_code, 200)
available_values = response.json()['values']
self.assertTrue(route_2.priority not in available_values)
|
leetcode/132.palindrome-partitioning-ii.py | geemaple/algorithm | 177 | 11160258 | # f(i) = min(f[j] + 1 where 0 <= j <= i - 1 and s[j: i - 1] is palindrome)
class Solution(object):
def minCut(self, s):
"""
:type s: str
:rtype: int
"""
if s is None or len(s) == 0:
return 0
m = len(s)
store = [[False for _ in range(m)] for _ in range(m)]
i = 0
j = 0
for t in range(m):
i = j = t # odd palindrome
while(i >= 0 and j < m and s[i] == s[j]):
store[i][j] = True
i -= 1
j += 1
i = t
j = t + 1 # even palindrome
while(i >= 0 and j < m and s[i] == s[j]):
store[i][j] = True
i -= 1
j += 1
table = [0 for _ in range(m + 1)]
for i in range(1, m + 1):
value = float('inf')
for j in range(i):
if store[j][i - 1]:
value = min(value, table[j] + 1)
table[i] = value
return table[-1] - 1 |
pwncat/commands/__init__.py | Mitul16/pwncat | 1,454 | 11160309 | <filename>pwncat/commands/__init__.py
"""
This module implements the command parser, lexer, highlighter, etc for pwncat.
Each command is defined as an individual module under ``pwncat/commands`` which
defines a ``Command`` class that inherits from :class:`pwncat.commands.CommandDefinition`.
Each command is capable of specifying the expected arguments similar to the way
they specified with argparse. Internally, we use the :class:`Parameter` definitions
to build an ``argparse`` parser. We also use them to build a lexer capable of
automatic syntax highlighting at the prompt.
To define a new command, simple create a new module under ``pwncat/commands`` and
define a class named ``Command``.
Example Custom Command
----------------------
.. code-block:: python
:caption: A Custom Command Placed in ``pwncat/commands``
class Command(CommandDefinition):
\""" Command documentation placed in the docstring \"""
PROG = "custom"
ARGS = {
"--option,-o": Parameter(Complete.NONE, help="help info", action="store_true"),
"positional": Parameter(
Complete.CHOICES,
metavar="POSITIONAL",
choices=["hello", "world"],
help="help information",
),
}
def run(self, manager: "pwncat.manager.Manager", args: "argparse.Namespace"):
manager.log("we ran a custom command!")
"""
import os
import re
import sys
import tty
import fcntl
import shlex
import pkgutil
import termios
import argparse
from io import TextIOWrapper
from enum import Enum, auto
from typing import Dict, List, Type, Callable, Iterable
from functools import partial
import rich.text
from pygments import token
from prompt_toolkit import ANSI, PromptSession
from pygments.lexer import RegexLexer
from pygments.styles import get_style_by_name
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.styles import Style, merge_styles
from prompt_toolkit.history import History
from prompt_toolkit.document import Document
from prompt_toolkit.completion import (
Completer,
Completion,
CompleteEvent,
WordCompleter,
merge_completers,
)
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.patch_stdout import patch_stdout
from prompt_toolkit.styles.pygments import style_from_pygments_cls
from prompt_toolkit.application.current import get_app
import pwncat
import pwncat.db
from pwncat.util import console
from pwncat.channel import ChannelClosed
class Complete(Enum):
"""
Command argument completion options. This defines how tab completion
works for an individual command parameter/argument. If you choose to
use the ``CHOICES`` type, you must specify the argparse ``choices``
argument to the :class:`Parameter` constructor. This argument can
either be an iterable or a callable which returns a generator. The
callable takes as an argument the manager. This allows you to have
contextual tab completions if needed.
"""
CHOICES = auto()
""" Complete argument from the list of choices specified in ``choices`` parameter """
LOCAL_FILE = auto()
""" Complete argument as a local file path """
REMOTE_FILE = auto()
""" Complete argument as a remote file path """
NONE = auto()
""" Do not provide argument completions """
class StoreConstOnce(argparse.Action):
"""Only allow the user to store a value in the destination once. This prevents
users from selection multiple actions in the privesc parser."""
def __call__(self, parser, namespace, values, option_string=None):
if hasattr(self, "__" + self.dest + "_seen"):
raise argparse.ArgumentError(self, "only one action may be specified")
setattr(namespace, "__" + self.dest + "_seen", True)
setattr(namespace, self.dest, self.const)
def StoreForAction(action: List[str]) -> Callable:
"""Generates a custom argparse Action subclass which verifies that the current
selected "action" option is one of the provided actions in this function. If
not, an error is raised."""
class StoreFor(argparse.Action):
"""Store the value if the currently selected action matches the list of
actions passed to this function."""
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, "action", None) not in action:
raise argparse.ArgumentError(
self,
f"{option_string}: only valid for {action}",
)
setattr(namespace, self.dest, values)
return StoreFor
def StoreConstForAction(action: List[str]) -> Callable:
"""Generates a custom argparse Action subclass which verifies that the current
selected "action" option is one of the provided actions in this function. If
not, an error is raised. This stores the constant `const` to the `dest` argument.
This is comparable to `store_const`, but checks that you have selected one of
the specified actions."""
class StoreFor(argparse.Action):
"""Store the value if the currently selected action matches the list of
actions passed to this function."""
def __call__(self, parser, namespace, values, option_string=None):
if getattr(namespace, "action", None) not in action:
raise argparse.ArgumentError(
self,
f"{option_string}: only valid for {action}",
)
setattr(namespace, self.dest, self.const)
return StoreFor
def get_module_choices(command):
"""Yields a list of module choices to be used with command argument
choices to select a valid module for the current target. For example
you could use ``Parameter(Complete.CHOICES, choices=get_module_choices)``"""
if command.manager.target is None:
return
yield from [
module.name.removeprefix("agnostic.").removeprefix(
command.manager.target.platform.name + "."
)
for module in command.manager.target.find_module("*")
]
class Parameter:
"""Generic parameter definition for commands.
This class allows you to specify the syntax highlighting, tab completion
and argparse settings for a command parameter in on go. The ``complete``
argument tells pwncat how to tab complete your argument. The ``token``
argument is normally omitted but can be used to change the pygments
syntax highlighting for your argument. All other arguments are passed
directly to ``argparse`` when constructing the parser.
:param complete: the completion type
:type complete: Complete
:param token: the Pygments token to highlight this argument with
:type token: Pygments Token
:param group: true for a group definition, a string naming the group to be a part of, or none
:param mutex: for group definitions, indicates whether this is a mutually exclusive group
:param args: positional arguments for ``add_argument`` or ``add_argument_group``
:param kwargs: keyword arguments for ``add_argument`` or ``add_argument_group``
"""
def __init__(
self,
complete: Complete,
token=token.Name.Label,
group: str = None,
*args,
**kwargs,
):
self.complete = complete
self.token = token
self.group = group
self.args = args
self.kwargs = kwargs
class Group:
"""
This just wraps the parameters to the add_argument_group and add_mutually_exclusive_group
"""
def __init__(self, mutex: bool = False, **kwargs):
self.mutex = mutex
self.kwargs = kwargs
class CommandDefinition:
"""
Generic structure for a local command.
The docstring for your command class becomes the long-form help for your command.
See the above example for a complete custom command definition.
:param manager: the controlling manager for this command
:type manager: pwncat.manager.Manager
"""
PROG = "unimplemented"
""" The name of your new command """
ARGS: Dict[str, Parameter] = {}
""" A dictionary of parameter definitions created with the ``Parameter`` class.
If this is None, your command will receive the raw argument string and no processing
will be done except removing the leading command name.
"""
GROUPS: Dict[str, Group] = {}
""" A dictionary mapping group definitions to group names. The parameters to Group
are passed directly to either add_argument_group or add_mutually_exclusive_group
with the exception of the mutex arg, which determines the group type. """
DEFAULTS = {}
""" A dictionary of default values (passed directly to ``ArgumentParser.set_defaults``) """
LOCAL = False
""" Whether this command is purely local or requires an connected remote host """
# An example definition of arguments
# PROG = "command"
# ARGS = {
# "--all,-a": parameter(
# Complete.NONE, action="store_true", help="A switch/option"
# ),
# "--file,-f": parameter(Complete.LOCAL_FILE, help="A local file"),
# "--rfile": parameter(Complete.REMOTE_FILE, help="A remote file"),
# "positional": parameter(
# Complete.CHOICES, choices=["a", "b", "c"], help="Choose one!"
# ),
# }
def __init__(self, manager: "pwncat.manager.Manager"):
"""Initialize a new command instance. Parse the local arguments array
into an argparse object."""
self.manager = manager
# Create the parser object
if self.ARGS is not None:
self.parser = argparse.ArgumentParser(
prog=self.PROG,
description=self.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
self.build_parser(self.parser, self.ARGS, self.GROUPS)
else:
self.parser = None
def run(self, manager: "pwncat.manager.Manager", args):
"""
This is the "main" for your new command. This should perform the action
represented by your command.
:param manager: the manager to operate on
:type manager: pwncat.manager.Manager
:param args: the argparse Namespace containing your parsed arguments
"""
raise NotImplementedError
def build_parser(
self,
parser: argparse.ArgumentParser,
args: Dict[str, Parameter],
group_defs: Dict[str, Group],
):
"""
Parse the ARGS and DEFAULTS dictionaries to build an argparse ArgumentParser
for this command. You should not need to overload this.
:param parser: the parser object to add arguments to
:param args: the ARGS dictionary
"""
groups = {}
for name, definition in group_defs.items():
if definition.mutex:
groups[name] = parser.add_mutually_exclusive_group(**definition.kwargs)
else:
groups[name] = parser.add_argument_group(**definition.kwargs)
for arg, param in args.items():
names = arg.split(",")
if param.group is not None and param.group not in groups:
raise ValueError(f"{param.group}: no such group")
if param.group is not None:
group = groups[param.group]
else:
group = parser
# Patch choice to work with a callable
if "choices" in param.kwargs and callable(param.kwargs["choices"]):
method = param.kwargs["choices"]
class wrapper:
def __init__(wself, method):
wself.method = method
def __iter__(wself):
yield from wself.method(self)
param.kwargs["choices"] = wrapper(method)
# Patch "type" so we can see "self"
if (
"type" in param.kwargs
and isinstance(param.kwargs["type"], tuple)
and param.kwargs["type"][0] == "method"
):
param.kwargs["type"] = partial(param.kwargs["type"][1], self)
group.add_argument(*names, *param.args, **param.kwargs)
parser.set_defaults(**self.DEFAULTS)
def resolve_blocks(source: str):
"""This is a dumb lexer that turns strings of text with code blocks (squigly
braces) into a single long string separated by semicolons. All code blocks are
converted to strings recursively with correct escaping levels. The resulting
string can be sent to break_commands to iterate over the commands."""
result = []
in_brace = False
inside_quotes = False
i = 0
lineno = 1
while i < len(source):
if not inside_quotes:
if source[i] == '"':
inside_quotes = True
result.append("\\" * int(in_brace) + '"')
elif source[i] == "{" and not in_brace:
result.append('"')
in_brace = True
elif source[i] == "}":
if not in_brace:
raise ValueError(f"line {lineno}: mismatched closing brace")
in_brace = False
result.append('"')
elif source[i] == "\\":
result.append("\\" * (int(in_brace)))
elif source[i] == "\n" and in_brace:
result.append("\\n")
elif source[i] == "#":
# Comment
while i < len(source) and source[i] != "\n":
i += 1
else:
result.append(source[i])
else:
if source[i] == '"':
inside_quotes = False
result.append("\\" * int(in_brace) + '"')
elif source[i] == "\\":
result.append("\\" * (in_brace + 1))
elif source[i] == "\n":
raise ValueError(f"line {lineno}: newlines cannot appear in strings")
else:
result.append(source[i])
if source[i] == "\n":
lineno += 1
i += 1
if in_brace:
raise ValueError("mismatched braces")
if inside_quotes:
raise ValueError("missing ending quote")
return "".join(result).split("\n")
class DatabaseHistory(History):
"""Yield history from the host entry in the database"""
def __init__(self, manager):
super().__init__()
self.manager = manager
def load_history_strings(self) -> Iterable[str]:
"""Load the history from the database"""
with self.manager.db.transaction() as conn:
yield from reversed(conn.root.history)
def store_string(self, string: str) -> None:
"""Store a command in the database"""
with self.manager.db.transaction() as conn:
conn.root.history.append(string)
class CommandParser:
"""Handles dynamically loading command classes, parsing input, and
dispatching commands. This class effectively has complete control over
the terminal whenever in an interactive pwncat session. It will change
termios modes for the control tty at will in order to support raw vs
command mode."""
def __init__(self, manager: "pwncat.manager.Manager"):
"""We need to dynamically load commands from pwncat.commands"""
self.manager = manager
self.commands: List["CommandDefinition"] = []
for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):
if module_name == "base":
continue
self.commands.append(
loader.find_module(module_name)
.load_module(module_name)
.Command(manager)
)
self.prompt: PromptSession = None
self.toolbar: PromptSession = None
self.loading_complete = False
self.aliases: Dict[str, CommandDefinition] = {}
self.shortcuts: Dict[str, CommandDefinition] = {}
self.found_prefix: bool = False
# Saved terminal state to support switching between raw and normal
# mode.
self.saved_term_state = None
def setup_prompt(self):
"""This needs to happen after __init__ when the database is fully
initialized."""
history = DatabaseHistory(self.manager)
completer = CommandCompleter(self.manager, self.commands)
lexer = PygmentsLexer(CommandLexer.build(self.commands))
style = style_from_pygments_cls(get_style_by_name("monokai"))
auto_suggest = AutoSuggestFromHistory()
bindings = KeyBindings()
@bindings.add("c-q")
def _(event):
"""Exit interactive mode"""
get_app().exit(exception=pwncat.manager.InteractiveExit())
self.prompt = PromptSession(
[
("fg:ansiyellow bold", "(local) "),
("fg:ansimagenta bold", "pwncat"),
("", "$ "),
],
completer=completer,
lexer=lexer,
style=merge_styles(
[style, Style.from_dict({"bottom-toolbar": "#333333 bg:#ffffff"})]
),
auto_suggest=auto_suggest,
complete_while_typing=False,
history=history,
bottom_toolbar=self._render_toolbar,
key_bindings=bindings,
)
def _render_toolbar(self):
"""Render the formatted text for the bottom toolbar"""
if self.manager.target is None:
markup_result = "Active Session: [red]None[/red]"
else:
markup_result = f"Active Session: {self.manager.target.platform}"
# Convert rich-style markup to prompt_toolkit formatted text
text = rich.text.Text.from_markup(markup_result)
segments = list(text.render(console))
rendered = []
# Here we take each segment's stile, invert the color and render the
# segment text. This is because the bottom toolbar has it's colors
# inverted.
for i in range(len(segments)):
style = segments[i].style.copy()
temp = style.color
style._color = segments[i].style.bgcolor
style._bgcolor = temp
rendered.append(style.render(segments[i].text))
# Join the rendered segments to ANSI escape sequences.
# This format can be parsed by prompt_toolkit formatted text.
ansi_result = "".join(rendered)
# Produce prompt_toolkit formatted text from the ANSI escaped string
return ANSI(ansi_result)
def eval(self, source: str, name: str = "<script>"):
"""Evaluate the given source file. This will execute the given string
as a script of commands. Syntax is the same except that commands may
be separated by semicolons, comments are accepted as following a "#" and
multiline strings are supported with '"{' and '}"' as delimeters."""
for command in resolve_blocks(source):
try:
self.dispatch_line(command)
except ChannelClosed as exc:
# A channel was unexpectedly closed
self.manager.log(
f"[yellow]warning[/yellow]: {exc.channel}: channel closed"
)
# Ensure any existing sessions are cleaned from the manager
exc.cleanup(self.manager)
except pwncat.manager.InteractiveExit:
# Within a script, `exit` means to exit the script, not the
# interpreter
break
except Exception as exc:
console.log(
f"[red]error[/red]: [cyan]{name}[/cyan]: [yellow]{command}[/yellow]: {str(exc)}"
)
break
def run_single(self):
"""Execute one Read-Execute iteration. This will prompt the user for input."""
if self.prompt is None:
self.setup_prompt()
try:
line = self.prompt.prompt().strip()
self.dispatch_line(line)
except (EOFError, OSError, KeyboardInterrupt, pwncat.manager.InteractiveExit):
return
def run(self):
"""Execute the pwncat REPL. This will continue running until an :class:`InteractiveExit`
exception or a :class:`EOFError` exception are raised."""
if self.prompt is None:
self.setup_prompt()
running = True
default_text = ""
while running:
try:
if self.manager.config.module:
self.prompt.message = [
(
"fg:ansiyellow bold",
f"({self.manager.config.module.name}) ",
),
("fg:ansimagenta bold", "pwncat"),
("", "$ "),
]
else:
self.prompt.message = [
("fg:ansiyellow bold", "(local) "),
("fg:ansimagenta bold", "pwncat"),
("", "$ "),
]
with patch_stdout(raw=True):
line = self.prompt.prompt(default=default_text).strip()
default_text = ""
if line == "":
continue
self.dispatch_line(line)
# We used to catch only KeyboardException, but this prevents a
# badly written command from completely killing our remote
# connection.
except EOFError:
# C-d was pressed. Assume we want to exit the prompt.
running = False
except KeyboardInterrupt:
# Normal C-c from a shell just clears the current prompt
continue
except ChannelClosed as exc:
# A channel was unexpectedly closed
self.manager.log(
f"[yellow]warning[/yellow]: {exc.channel}: channel closed"
)
# Ensure any existing sessions are cleaned from the manager
exc.cleanup(self.manager)
except pwncat.manager.InteractiveExit:
# We don't want this caught below, so we catch it here
# then re-raise it to be caught by the interactive method
raise
except (Exception, KeyboardInterrupt):
console.print_exception(width=None)
continue
def dispatch_line(self, line: str, prog_name: str = None):
"""Parse the given line of command input and dispatch a command"""
# Account for blank or whitespace only lines
line = line.strip()
if line == "":
return
try:
# Spit the line with shell rules
argv = shlex.split(line)
except ValueError as e:
self.manager.log(f"[red]error[/red]: {e.args[0]}")
return
if argv[0][0] in self.shortcuts:
command = self.shortcuts[argv[0][0]]
argv[0] = argv[0][1:]
args = argv
line = line[1:]
else:
line = f"{argv[0]} ".join(line.split(f"{argv[0]} ")[1:])
# Search for a matching command
for command in self.commands:
if command.PROG == argv[0]:
break
else:
if argv[0] in self.aliases:
command = self.aliases[argv[0]]
else:
self.manager.log(f"[red]error[/red]: {argv[0]}: unknown command")
return
if self.manager.target is None and not command.LOCAL:
self.manager.log(
f"[red]error[/red]: {argv[0]}: active session required"
)
return
args = argv[1:]
args = [a.encode("utf-8").decode("unicode_escape") for a in args]
try:
if prog_name:
temp_name = command.parser.prog
command.parser.prog = prog_name
prog_name = temp_name
# Parse the arguments
if command.parser:
args = command.parser.parse_args(args)
else:
args = line
# Run the command
command.run(self.manager, args)
if prog_name:
command.parser.prog = prog_name
except SystemExit:
# The arguments were incorrect
return
def parse_prefix(self, channel, data: bytes):
"""Parse data received from the user when in pwncat's raw mode.
This will intercept key presses from the user and interpret the
prefix and any bound keyboard shortcuts. It also sends any data
without a prefix to the remote channel.
:param data: input data from user
:type data: bytes
"""
buffer = b""
for c in data:
if not self.found_prefix and c != pwncat.config["prefix"].value:
buffer += c
continue
elif not self.found_prefix and c == pwncat.config["prefix"].value:
self.found_prefix = True
channel.send(buffer)
buffer = b""
continue
elif self.found_prefix:
try:
binding = pwncat.config.binding(c)
if binding.strip() == "pass":
buffer += c
else:
# Restore the normal terminal
self.restore_term()
# Run the binding script
self.eval(binding, "<binding>")
# Drain any channel output
channel.drain()
channel.send(b"\n")
# Go back to a raw terminal
self.raw_mode()
except KeyError:
pass
self.found_prefix = False
# Flush any remaining raw data bound for the victim
channel.send(buffer)
def raw_mode(self):
"""Save the current terminal state and enter raw mode.
If the terminal is already in raw mode, this function
does nothing."""
if self.saved_term_state is not None:
return
# Ensure we don't have any weird buffering issues
sys.stdout.flush()
# Python doesn't provide a way to use setvbuf, so we reopen stdout
# and specify no buffering. Duplicating stdin allows the user to press C-d
# at the local prompt, and still be able to return to the remote prompt.
try:
os.dup2(sys.stdin.fileno(), sys.stdout.fileno())
except OSError:
pass
sys.stdout = TextIOWrapper(
os.fdopen(os.dup(sys.stdin.fileno()), "bw", buffering=0),
write_through=True,
line_buffering=False,
)
# Grab and duplicate current attributes
fild = sys.stdin.fileno()
old = termios.tcgetattr(fild)
new = termios.tcgetattr(fild)
# Remove ECHO from lflag and ensure we won't block
new[3] &= ~(termios.ECHO | termios.ICANON)
new[6][termios.VMIN] = 0
new[6][termios.VTIME] = 0
termios.tcsetattr(fild, termios.TCSADRAIN, new)
# Set raw mode
tty.setraw(sys.stdin)
orig_fl = fcntl.fcntl(sys.stdin, fcntl.F_GETFL)
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, orig_fl)
self.saved_term_state = old, orig_fl
def restore_term(self, new_line=True):
"""Restores the normal terminal settings. This does nothing if the
terminal is not currently in raw mode."""
if self.saved_term_state is None:
return
termios.tcsetattr(
sys.stdin.fileno(), termios.TCSADRAIN, self.saved_term_state[0]
)
# tty.setcbreak(sys.stdin)
fcntl.fcntl(sys.stdin, fcntl.F_SETFL, self.saved_term_state[1])
if new_line:
sys.stdout.write("\n")
self.saved_term_state = None
class CommandLexer(RegexLexer):
"""Implements a Regular Expression based pygments lexer for dynamically highlighting
the pwncat prompt during typing. The tokens are generated from command definitions."""
tokens = {}
@classmethod
def build(cls, commands: List["CommandDefinition"]) -> Type["CommandLexer"]:
"""Build the RegexLexer token list from the command definitions"""
root = []
sorted_commands = sorted(commands, key=lambda cmd: len(cmd.PROG), reverse=True)
for command in sorted_commands:
root.append(
(
"^" + re.escape(command.PROG) + "( |$)",
token.Name.Function,
command.PROG,
)
)
mode = []
if command.ARGS is not None:
for args, param in command.ARGS.items():
for arg in args.split(","):
if not arg.startswith("-"):
continue
if param.complete != Complete.NONE:
# Enter param state
mode.append((r"\s+" + re.escape(arg), param.token, "param"))
else:
# Don't enter param state
mode.append((r"\s+" + re.escape(arg), param.token))
mode.append((r"\s+(\-\-help|\-h)", token.Name.Label))
mode.append((r"\"", token.String, "string"))
mode.append((r".", token.Text))
cls.tokens[command.PROG] = mode
root.append((r".", token.Text))
cls.tokens["root"] = root
cls.tokens["param"] = [
(r"\"", token.String, "string"),
(r"\s", token.Text, "#pop"),
(r"[^\s]", token.Text),
]
cls.tokens["string"] = [
(r"[^\"\\]+", token.String),
(r"\\.", token.String.Escape),
('"', token.String, "#pop"),
]
return cls
class RemotePathCompleter(Completer):
"""Complete remote file names/paths"""
def __init__(self, manager: "pwncat.manager.Manager", *args, **kwargs):
super().__init__(*args, **kwargs)
self.manager = manager
def get_completions(self, document: Document, complete_event: CompleteEvent):
if self.manager.target is None:
return
before = document.text_before_cursor.split()[-1]
path, partial_name = os.path.split(before)
if path == "":
path = "."
for name in self.manager.target.platform.listdir(path):
if name.startswith(partial_name):
yield Completion(
name,
start_position=-len(partial_name),
display=[("#ff0000", "(remote)"), ("", f" {name}")],
)
class LocalPathCompleter(Completer):
"""Complete local file names/paths."""
def get_completions(self, document: Document, complete_event: CompleteEvent):
before = document.text_before_cursor.split()[-1]
path, partial_name = os.path.split(before)
if path == "":
path = "."
# Ensure the directory exists
if not os.path.isdir(path):
return
for name in os.listdir(path):
if name.startswith(partial_name):
yield Completion(
name,
start_position=-len(partial_name),
display=[("fg:ansiyellow", "(local)"), ("", f" {name}")],
)
class CommandCompleter(Completer):
"""Tab-complete commands and all of their arguments dynamically using the
command definitions and their associated argument definitions."""
def __init__(
self, manager: "pwncat.manager.Manager", commands: List["CommandDefinition"]
):
"""Construct a new command completer"""
self.layers = {}
local_file_completer = LocalPathCompleter()
remote_file_completer = RemotePathCompleter(manager)
for command in commands:
self.layers[command.PROG] = [None, [], {}]
option_names = []
if command.ARGS is not None:
for name_list, param in command.ARGS.items():
name_list = name_list.split(",")
if param.complete == Complete.CHOICES:
completer = ("choices", param.kwargs["choices"])
elif param.complete == Complete.LOCAL_FILE:
completer = local_file_completer
elif param.complete == Complete.REMOTE_FILE:
completer = remote_file_completer
elif param.complete == Complete.NONE:
completer = None
if len(name_list) == 1 and not name_list[0].startswith("-"):
self.layers[command.PROG][1].append(completer)
else:
for name in name_list:
self.layers[command.PROG][2][name] = completer
option_names.append(name)
self.layers[command.PROG][0] = WordCompleter(
option_names + ["--help", "-h"]
)
self.completer = WordCompleter(list(self.layers))
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
"""Get a list of completions for the given document"""
text = document.text_before_cursor.lstrip()
try:
args = shlex.split(text)
except ValueError:
try:
args = shlex.split(text + '"')
except ValueError:
args = shlex.split(text + "'")
# We haven't finished typing the command. Use our word completer for
# commands
if text == "" or (len(args) == 1 and not text.endswith(" ")):
yield from self.completer.get_completions(document, complete_event)
return
# Not in a known command, can't autocomplete
if args[0] not in self.layers:
return
command = self.layers[args[0]]
args = args[1:]
next_completer = command[0]
this_completer = command[0]
positional = 0
# state = "options", completing options next
# state = "arguments", completing arguments to options next
state = "options"
for arg in args:
if state == "options":
# Flag options
if arg.startswith("-"):
# Exact match, with a sub-completer
if arg in command[2] and command[2][arg] is not None:
# Completer for next argument
next_completer = command[2][arg]
state = "arguments"
# Exact match, with no arguments
elif arg in command[2]:
# Command has no argument, next completer is options
# completer
next_completer = command[0]
state = "options"
this_completer = command[0]
# Non-exact match
else:
next_completer = command[0]
this_completer = command[0]
state = "options"
# Appears to be a positional argument, grab next positional
# completer and increment positional count
else:
if positional < len(command[1]):
this_completer = command[1][positional]
next_completer = command[0]
state = "options"
positional += 1
else:
this_completer = command[0]
next_completer = command[0]
state = "options"
else:
# Completing an argument to a option/switch. We can't verify
# it's legitimacy, so we assume it's right, and reset to a
# default state.
state = "options"
this_completer = next_completer
next_completer = command[0]
# We are completing the first argument. This could be
# any option argument or the first positional argument.
# We need to merge them.
if not args and text.endswith(" ") and command[1]:
completer = command[1][0]
if isinstance(completer, tuple) and completer[0] == "choices":
completer = WordCompleter(completer[1], WORD=True)
next_completer = merge_completers([next_completer, completer])
if isinstance(this_completer, tuple) and this_completer[0] == "choices":
this_completer = WordCompleter(this_completer[1], WORD=True)
if isinstance(next_completer, tuple) and next_completer[0] == "choices":
next_completer = WordCompleter(next_completer[1], WORD=True)
if text.endswith(" ") and next_completer is not None:
yield from next_completer.get_completions(document, complete_event)
elif this_completer is not None:
yield from this_completer.get_completions(document, complete_event)
|
exercises/practice/rna-transcription/rna_transcription_test.py | gsilvapt/python | 1,177 | 11160318 | <reponame>gsilvapt/python<filename>exercises/practice/rna-transcription/rna_transcription_test.py
import unittest
from rna_transcription import (
to_rna,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class RnaTranscriptionTest(unittest.TestCase):
def test_empty_rna_sequence(self):
self.assertEqual(to_rna(""), "")
def test_rna_complement_of_cytosine_is_guanine(self):
self.assertEqual(to_rna("C"), "G")
def test_rna_complement_of_guanine_is_cytosine(self):
self.assertEqual(to_rna("G"), "C")
def test_rna_complement_of_thymine_is_adenine(self):
self.assertEqual(to_rna("T"), "A")
def test_rna_complement_of_adenine_is_uracil(self):
self.assertEqual(to_rna("A"), "U")
def test_rna_complement(self):
self.assertEqual(to_rna("ACGTGGTCTTAA"), "UGCACCAGAAUU")
if __name__ == "__main__":
unittest.main()
|
plynx/constants/web.py | khaxis/plynx | 137 | 11160333 | """Web constants"""
class ResponseStatus:
"""Returned response status"""
SUCCESS: str = 'SUCCESS'
FAILED: str = 'FAILED'
|
examples/python/geometry/voxel_grid_carving.py | amoran-symbio/Open3D | 1,455 | 11160362 | # ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
import open3d as o3d
import numpy as np
import sys
import os
pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
test_data_path = os.path.join(os.path.dirname(pyexample_path), 'test_data')
sys.path.append(pyexample_path)
import open3d_example as o3dex
def xyz_spherical(xyz):
x = xyz[0]
y = xyz[1]
z = xyz[2]
r = np.sqrt(x * x + y * y + z * z)
r_x = np.arccos(y / r)
r_y = np.arctan2(z, x)
return [r, r_x, r_y]
def get_rotation_matrix(r_x, r_y):
rot_x = np.asarray([[1, 0, 0], [0, np.cos(r_x), -np.sin(r_x)],
[0, np.sin(r_x), np.cos(r_x)]])
rot_y = np.asarray([[np.cos(r_y), 0, np.sin(r_y)], [0, 1, 0],
[-np.sin(r_y), 0, np.cos(r_y)]])
return rot_y.dot(rot_x)
def get_extrinsic(xyz):
rvec = xyz_spherical(xyz)
r = get_rotation_matrix(rvec[1], rvec[2])
t = np.asarray([0, 0, 2]).transpose()
trans = np.eye(4)
trans[:3, :3] = r
trans[:3, 3] = t
return trans
def preprocess(model):
min_bound = model.get_min_bound()
max_bound = model.get_max_bound()
center = min_bound + (max_bound - min_bound) / 2.0
scale = np.linalg.norm(max_bound - min_bound) / 2.0
vertices = np.asarray(model.vertices)
vertices -= center
model.vertices = o3d.utility.Vector3dVector(vertices / scale)
return model
def voxel_carving(mesh,
output_filename,
camera_path,
cubic_size,
voxel_resolution,
w=300,
h=300):
mesh.compute_vertex_normals()
camera_sphere = o3d.io.read_triangle_mesh(camera_path)
# Setup dense voxel grid.
voxel_carving = o3d.geometry.VoxelGrid.create_dense(
width=cubic_size,
height=cubic_size,
depth=cubic_size,
voxel_size=cubic_size / voxel_resolution,
origin=[-cubic_size / 2.0, -cubic_size / 2.0, -cubic_size / 2.0],
color=[1.0, 0.7, 0.0])
# Rescale geometry.
camera_sphere = preprocess(camera_sphere)
mesh = preprocess(mesh)
# Setup visualizer to render depthmaps.
vis = o3d.visualization.Visualizer()
vis.create_window(width=w, height=h, visible=False)
vis.add_geometry(mesh)
vis.get_render_option().mesh_show_back_face = True
ctr = vis.get_view_control()
param = ctr.convert_to_pinhole_camera_parameters()
# Carve voxel grid.
centers_pts = np.zeros((len(camera_sphere.vertices), 3))
for cid, xyz in enumerate(camera_sphere.vertices):
# Get new camera pose.
trans = get_extrinsic(xyz)
param.extrinsic = trans
c = np.linalg.inv(trans).dot(np.asarray([0, 0, 0, 1]).transpose())
centers_pts[cid, :] = c[:3]
ctr.convert_from_pinhole_camera_parameters(param)
# Capture depth image and make a point cloud.
vis.poll_events()
vis.update_renderer()
depth = vis.capture_depth_float_buffer(False)
# Depth map carving method.
voxel_carving.carve_depth_map(o3d.geometry.Image(depth), param)
print("Carve view %03d/%03d" % (cid + 1, len(camera_sphere.vertices)))
vis.destroy_window()
return voxel_carving
if __name__ == "__main__":
mesh = o3dex.get_armadillo_mesh()
output_filename = os.path.join(test_data_path, 'voxelized.ply')
camera_path = os.path.join(test_data_path, 'sphere.ply')
cubic_size = 2.0
voxel_resolution = 128.0
carved_voxels = voxel_carving(mesh, output_filename, camera_path,
cubic_size, voxel_resolution)
print("Carved voxels ...")
print(carved_voxels)
o3d.visualization.draw([carved_voxels])
|
tests/basics/int_divzero.py | learnforpractice/micropython-cpp | 13,648 | 11160363 | try:
1 // 0
except ZeroDivisionError:
print("ZeroDivisionError")
try:
1 % 0
except ZeroDivisionError:
print("ZeroDivisionError")
|
tests/pytests/unit/states/apache/test_conf.py | babs/salt | 9,425 | 11160368 | <filename>tests/pytests/unit/states/apache/test_conf.py
import pytest
import salt.states.apache_conf as apache_conf
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {apache_conf: {}}
def test_enabled():
"""
Test to ensure an Apache conf is enabled.
"""
name = "saltstack.com"
ret = {"name": name, "result": True, "changes": {}, "comment": ""}
mock = MagicMock(side_effect=[True, False, False])
mock_str = MagicMock(return_value={"Status": ["enabled"]})
with patch.dict(
apache_conf.__salt__,
{"apache.check_conf_enabled": mock, "apache.a2enconf": mock_str},
):
comt = "{} already enabled.".format(name)
ret.update({"comment": comt})
assert apache_conf.enabled(name) == ret
comt = "Apache conf {} is set to be enabled.".format(name)
ret.update(
{"comment": comt, "result": None, "changes": {"new": name, "old": None}}
)
with patch.dict(apache_conf.__opts__, {"test": True}):
assert apache_conf.enabled(name) == ret
comt = "Failed to enable {} Apache conf".format(name)
ret.update({"comment": comt, "result": False, "changes": {}})
with patch.dict(apache_conf.__opts__, {"test": False}):
assert apache_conf.enabled(name) == ret
def test_disabled():
"""
Test to ensure an Apache conf is disabled.
"""
name = "saltstack.com"
ret = {"name": name, "result": None, "changes": {}, "comment": ""}
mock = MagicMock(side_effect=[True, True, False])
mock_str = MagicMock(return_value={"Status": ["disabled"]})
with patch.dict(
apache_conf.__salt__,
{"apache.check_conf_enabled": mock, "apache.a2disconf": mock_str},
):
comt = "Apache conf {} is set to be disabled.".format(name)
ret.update({"comment": comt, "changes": {"new": None, "old": name}})
with patch.dict(apache_conf.__opts__, {"test": True}):
assert apache_conf.disabled(name) == ret
comt = "Failed to disable {} Apache conf".format(name)
ret.update({"comment": comt, "result": False, "changes": {}})
with patch.dict(apache_conf.__opts__, {"test": False}):
assert apache_conf.disabled(name) == ret
comt = "{} already disabled.".format(name)
ret.update({"comment": comt, "result": True})
assert apache_conf.disabled(name) == ret
|
examples/asr_librispeech/local/prepare_librispeech.py | slikos/espresso | 920 | 11160390 | <reponame>slikos/espresso
#!/usr/bin/env python3
# Copyright (c) <NAME>
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import os
import sys
from tqdm import tqdm
try:
from torchaudio.datasets import LIBRISPEECH
except ImportError:
raise ImportError("Please install torchaudio with: pip install torchaudio")
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(__file__)
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
"dev-clean",
"dev-other",
"test-clean",
"test-other",
]
def get_parser():
parser = argparse.ArgumentParser(
description="Prepare LibriSpeech corpus"
)
# fmt: off
parser.add_argument("corpus_root", type=str, help="path to the LibriSpeech root directory")
parser.add_argument("output_root", type=str, help="path to the root directory of generated data")
parser.add_argument("--folder-in-archive", type=str, default="LibriSpeech", help="the top-level directory of the dataset")
parser.add_argument("--download", action="store_true", help="download the dataset if it is not found at corpus root path")
# fmt: on
return parser
def main(args):
corpus_root = Path(args.corpus_root).absolute()
output_root = Path(args.output_root).absolute()
corpus_root.mkdir(exist_ok=True)
output_root.mkdir(exist_ok=True)
for split in SPLITS:
logger.info(f"Preparing data for split {split}...")
output_dir = output_root / split.replace("-", "_")
output_dir.mkdir(exist_ok=True)
wave_file = output_dir / "wav.txt"
text_file = output_dir / "text.txt"
if os.path.exists(wave_file) and os.path.exists(text_file):
logger.info(f"Both {wave_file} and {text_file} exist, skip regenerating")
continue
dataset = LIBRISPEECH(
corpus_root.as_posix(), url=split, folder_in_archive=args.folder_in_archive, download=args.download
)
with open(wave_file, "w", encoding="utf-8") as wave_f, open(text_file, "w", encoding="utf-8") as text_f:
for data_tuple in tqdm(dataset):
if len(data_tuple) == 6: # torchaudio=0.7.0
# (waveform, sample_rate, text, speaker_id, chapter_id, utterance_idx)
text, speaker_id, chapter_id, utterance_idx = data_tuple[2], data_tuple[3], data_tuple[4], data_tuple[5]
else: # torchaudio>=0.8.0
# (waveform, sample_rate, orignal_text, normalized_text, speaker_id, chapter_id, utterance_idx)
assert len(data_tuple) == 7
text, speaker_id, chapter_id, utterance_idx = data_tuple[3], data_tuple[4], data_tuple[5], data_tuple[6]
utterance_idx = str(utterance_idx).zfill(4)
utterance_id = f"{speaker_id}-{chapter_id}-{utterance_idx}"
utterance_path = os.path.join(
corpus_root.as_posix(), args.folder_in_archive, split, str(speaker_id), str(chapter_id), utterance_id
)
print(f"{utterance_id} {utterance_path}.flac", file=wave_f)
print(f"{utterance_id} {text}", file=text_f)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
main(args)
|
tests/search_test.py | mtcolman/django-DefectDojo | 249 | 11160451 | <gh_stars>100-1000
import unittest
import sys
from base_test_class import BaseTestCase
from selenium.webdriver.common.by import By
class SearchTests(BaseTestCase):
def test_login(self):
driver = self.driver
def test_search(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('finding')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_cve(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('cve:CVE-2020-12345')
driver.find_element(By.ID, "simple_search_submit").click()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('CVE-2020-12345')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_tag(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('tag:magento')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_product_tag(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('product-tag:java')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_engagement_tag(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('engagement-tag:php')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_test_tag(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('test-tag:go')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_tags(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('tags:php')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_product_tags(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('product-tags:java')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_engagement_tags(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('engagement-tags:php')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_test_tags(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('test-tags:go')
driver.find_element(By.ID, "simple_search_submit").click()
def test_search_id(self):
# very basic search test to see if it doesn't 500
driver = self.goto_some_page()
driver.find_element(By.ID, "simple_search").clear()
driver.find_element(By.ID, "simple_search").send_keys('id:1')
driver.find_element(By.ID, "simple_search_submit").click()
def suite():
suite = unittest.TestSuite()
suite.addTest(BaseTestCase('test_login'))
suite.addTest(BaseTestCase('disable_block_execution'))
suite.addTest(SearchTests('test_search'))
suite.addTest(SearchTests('test_search_cve'))
suite.addTest(SearchTests('test_search_tag'))
suite.addTest(SearchTests('test_search_product_tag'))
suite.addTest(SearchTests('test_search_engagement_tag'))
suite.addTest(SearchTests('test_search_test_tag'))
suite.addTest(SearchTests('test_search_tags'))
suite.addTest(SearchTests('test_search_product_tags'))
suite.addTest(SearchTests('test_search_engagement_tags'))
suite.addTest(SearchTests('test_search_test_tags'))
suite.addTest(SearchTests('test_search_id'))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(descriptions=True, failfast=True, verbosity=2)
ret = not runner.run(suite()).wasSuccessful()
BaseTestCase.tearDownDriver()
sys.exit(ret)
|
python/tvm/relay/backend/contrib/ethosu/op/unary_elementwise.py | XiaoSong9905/tvm | 4,640 | 11160469 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operator for unary elementwise operations for Arm(R) Ethos(TM)-U NPU"""
from typing import Optional
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import unary_elementwise_compute
def _extract_ethosu_unary_elementwise_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_unary_elementwise compute TE
from a ethosu_unary_elementwise Relay call."""
ifm = args[0]
lut = args[1]
operator_type = attrs.operator_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
ofm_channels = attrs.ofm_channels
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
ofm_channels,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.unary_elementwise", "FTVMCompute")
def create_ethosu_unary_elementwise_compute(attrs, args, out_type):
"""Create an ethosu_unary_elementwise compute op."""
params = _extract_ethosu_unary_elementwise_params(attrs, args)
op = unary_elementwise_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.unary_elementwise", "FTVMStrategy")
def unary_elementwise_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_unary_elementwise_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_unary_elementwise",
)
return strategy
def ethosu_unary_elementwise(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ofm_channels: int,
activation: Optional[str] = "NONE",
clip_min: Optional[int] = 0,
clip_max: Optional[int] = 0,
rounding_mode: Optional[str] = "TFL",
ifm_layout: Optional[str] = "NHWC",
ofm_layout: Optional[str] = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized unary elementwise operation as supported by the
NPU. It accepts either NHWC or NHCWB16 format for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the unary elementwise operator.
"ABS"
"CLZ"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ofm_channels : int
The number of OFM channels.
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
out : tvm.relay.Call
A call to the ethosu_unary_elementwise op.
"""
return _make.ethosu_unary_elementwise(
ifm,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
ofm_channels,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ofm_layout,
)
|
release/stubs.min/RevitServices/Transactions.py | htlcnn/ironpython-stubs | 182 | 11160480 | # encoding: utf-8
# module RevitServices.Transactions calls itself Transactions
# from RevitServices,Version=1.2.1.3083,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class AutomaticTransactionStrategy(object,ITransactionStrategy):
""" AutomaticTransactionStrategy() """
def EnsureInTransaction(self,wrapper,document):
""" EnsureInTransaction(self: AutomaticTransactionStrategy,wrapper: TransactionWrapper,document: Document) -> TransactionHandle """
pass
def ForceCloseTransaction(self,handle):
""" ForceCloseTransaction(self: AutomaticTransactionStrategy,handle: TransactionHandle) """
pass
def TransactionTaskDone(self,handle):
""" TransactionTaskDone(self: AutomaticTransactionStrategy,handle: TransactionHandle) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
class DebugTransactionStrategy(object,ITransactionStrategy):
""" DebugTransactionStrategy() """
def EnsureInTransaction(self,wrapper,document):
""" EnsureInTransaction(self: DebugTransactionStrategy,wrapper: TransactionWrapper,document: Document) -> TransactionHandle """
pass
def ForceCloseTransaction(self,handle):
""" ForceCloseTransaction(self: DebugTransactionStrategy,handle: TransactionHandle) """
pass
def TransactionTaskDone(self,handle):
""" TransactionTaskDone(self: DebugTransactionStrategy,handle: TransactionHandle) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
class FailureDelegate(MulticastDelegate,ICloneable,ISerializable):
""" FailureDelegate(object: object,method: IntPtr) """
def BeginInvoke(self,failures,callback,object):
""" BeginInvoke(self: FailureDelegate,failures: FailuresAccessor,callback: AsyncCallback,object: object) -> IAsyncResult """
pass
def CombineImpl(self,*args):
"""
CombineImpl(self: MulticastDelegate,follow: Delegate) -> Delegate
Combines this System.Delegate with the specified System.Delegate to form a new
delegate.
follow: The delegate to combine with this delegate.
Returns: A delegate that is the new root of the System.MulticastDelegate invocation list.
"""
pass
def DynamicInvokeImpl(self,*args):
"""
DynamicInvokeImpl(self: Delegate,args: Array[object]) -> object
Dynamically invokes (late-bound) the method represented by the current delegate.
args: An array of objects that are the arguments to pass to the method represented by
the current delegate.-or- null,if the method represented by the current
delegate does not require arguments.
Returns: The object returned by the method represented by the delegate.
"""
pass
def EndInvoke(self,result):
""" EndInvoke(self: FailureDelegate,result: IAsyncResult) """
pass
def GetMethodImpl(self,*args):
"""
GetMethodImpl(self: MulticastDelegate) -> MethodInfo
Returns a static method represented by the current System.MulticastDelegate.
Returns: A static method represented by the current System.MulticastDelegate.
"""
pass
def Invoke(self,failures):
""" Invoke(self: FailureDelegate,failures: FailuresAccessor) """
pass
def RemoveImpl(self,*args):
"""
RemoveImpl(self: MulticastDelegate,value: Delegate) -> Delegate
Removes an element from the invocation list of this System.MulticastDelegate
that is equal to the specified delegate.
value: The delegate to search for in the invocation list.
Returns: If value is found in the invocation list for this instance,then a new
System.Delegate without value in its invocation list; otherwise,this instance
with its original invocation list.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,object,method):
""" __new__(cls: type,object: object,method: IntPtr) """
pass
def __reduce_ex__(self,*args):
pass
class ITransactionStrategy:
# no doc
def EnsureInTransaction(self,wrapper,document):
""" EnsureInTransaction(self: ITransactionStrategy,wrapper: TransactionWrapper,document: Document) -> TransactionHandle """
pass
def ForceCloseTransaction(self,handle):
""" ForceCloseTransaction(self: ITransactionStrategy,handle: TransactionHandle) """
pass
def TransactionTaskDone(self,handle):
""" TransactionTaskDone(self: ITransactionStrategy,handle: TransactionHandle) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class TransactionHandle(object):
# no doc
def CancelTransaction(self):
""" CancelTransaction(self: TransactionHandle) -> TransactionStatus """
pass
def CommitTransaction(self):
""" CommitTransaction(self: TransactionHandle) -> TransactionStatus """
pass
Status=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Status(self: TransactionHandle) -> TransactionStatus
"""
class TransactionManager(object):
# no doc
def EnsureInTransaction(self,document):
""" EnsureInTransaction(self: TransactionManager,document: Document) """
pass
def ForceCloseTransaction(self):
""" ForceCloseTransaction(self: TransactionManager) """
pass
@staticmethod
def SetupManager(strategy=None):
""" SetupManager(strategy: ITransactionStrategy)SetupManager() """
pass
def TransactionTaskDone(self):
""" TransactionTaskDone(self: TransactionManager) """
pass
DoAssertInIdleThread=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: DoAssertInIdleThread(self: TransactionManager) -> bool
Set: DoAssertInIdleThread(self: TransactionManager)=value
"""
Strategy=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Strategy(self: TransactionManager) -> ITransactionStrategy
Set: Strategy(self: TransactionManager)=value
"""
TransactionWrapper=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TransactionWrapper(self: TransactionManager) -> TransactionWrapper
"""
OnLog=None
class TransactionWrapper(object):
# no doc
def StartTransaction(self,document):
""" StartTransaction(self: TransactionWrapper,document: Document) -> TransactionHandle """
pass
TransactionActive=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: TransactionActive(self: TransactionWrapper) -> bool
"""
FailuresRaised=None
TransactionCancelled=None
TransactionCommitted=None
TransactionName='Dynamo-51297CB5 Script'
TransactionStarted=None
|
metrics/frugalscore/frugalscore.py | MitchellTesla/datasets | 3,395 | 11160484 | <reponame>MitchellTesla/datasets
# Copyright 2022 The HuggingFace Datasets Authors and the current metric script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FrugalScore metric."""
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
import datasets
_CITATION = """\
@article{eddine2021frugalscore,
title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation},
author={<NAME> and <NAME> and <NAME>},
journal={arXiv preprint arXiv:2110.08559},
year={2021}
}
"""
_DESCRIPTION = """\
FrugalScore is a reference-based metric for NLG models evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.
"""
_KWARGS_DESCRIPTION = """
Calculates how good are predictions given some references, using certain scores.
Args:
predictions (list of str): list of predictions to score. Each predictions
should be a string.
references (list of str): list of reference for each prediction. Each
reference should be a string.
batch_size (int): the batch size for predictions.
max_length (int): maximum sequence length.
device (str): either gpu or cpu
Returns:
scores (list of int): list of scores.
Examples:
>>> frugalscore = datasets.load_metric("frugalscore")
>>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face'])
>>> print([round(s, 3) for s in results["scores"]])
[0.631, 0.645]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class FRUGALSCORE(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}
),
homepage="https://github.com/moussaKam/FrugalScore",
)
def _download_and_prepare(self, dl_manager):
if self.config_name == "default":
checkpoint = "moussaKam/frugalscore_tiny_bert-base_bert-score"
else:
checkpoint = self.config_name
self.model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
self.tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def _compute(
self,
predictions,
references,
batch_size=32,
max_length=128,
device=None,
):
"""Returns the scores"""
assert len(predictions) == len(
references
), "predictions and references should have the same number of sentences."
if device is not None:
assert device in ["gpu", "cpu"], "device should be either gpu or cpu."
else:
device = "gpu" if torch.cuda.is_available() else "cpu"
training_args = TrainingArguments(
"trainer",
fp16=(device == "gpu"),
per_device_eval_batch_size=batch_size,
report_to="all",
no_cuda=(device == "cpu"),
log_level="warning",
)
dataset = {"sentence1": predictions, "sentence2": references}
raw_datasets = datasets.Dataset.from_dict(dataset)
def tokenize_function(data):
return self.tokenizer(
data["sentence1"], data["sentence2"], max_length=max_length, truncation=True, padding=True
)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
tokenized_datasets.remove_columns(["sentence1", "sentence2"])
trainer = Trainer(self.model, training_args, tokenizer=self.tokenizer)
predictions = trainer.predict(tokenized_datasets)
return {"scores": list(predictions.predictions.squeeze(-1))}
|
base/site-packages/jpush/device/__init__.py | edisonlz/fastor | 285 | 11160511 | from .core import Device
from .entity import (
add,
remove,
device_tag,
device_alias,
device_regid,
device_mobile,
)
__all__ = [
Device,
add,
remove,
device_tag,
device_alias,
device_regid,
device_mobile,
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.