max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
src/blade/inclusion_check.py | Madision-Jack/blade-build | 842 | 12607653 | # Copyright (c) 2021 Tencent Inc.
# All rights reserved.
#
# Author: chen3feng <<EMAIL>>
# Date: Feb 14, 2021
"""C/C++ header file inclusion dependency declaration check."""
import os
from blade import console
from blade import util
from blade.util import pickle
def find_libs_by_header(hdr, hdr_targets_map, hdr_dir_targets_map):
"""Find the libraries to which the header file belongs."""
libs = hdr_targets_map.get(hdr)
if libs:
return libs
hdr_dir = os.path.dirname(hdr)
while True:
libs = hdr_dir_targets_map.get(hdr_dir)
if libs:
return libs
old_hdr_dir = hdr_dir
hdr_dir = os.path.dirname(hdr_dir)
if hdr_dir == old_hdr_dir:
return set()
class GlobalDeclaration(object):
"""Global inclusion dependenct relationship declaration."""
def __init__(self, declaration_file):
self._declaration_file = declaration_file
self._initialized = False
def lazy_init(self, reason):
if self._initialized:
return
console.debug("Load global declaration file, " + reason)
declaration = pickle.load(open(self._declaration_file, 'rb'))
# pylint: disable=attribute-defined-outside-init
self._hdr_targets_map = declaration['public_hdrs']
self._hdr_dir_targets_map = declaration['public_incs']
self._private_hdrs_target_map = declaration['private_hdrs']
self._allowed_undeclared_hdrs = declaration['allowed_undeclared_hdrs']
self._initialized = True
def find_libs_by_header(self, hdr):
self.lazy_init('find_libs_by_header ' + hdr)
return find_libs_by_header(hdr, self._hdr_targets_map, self._hdr_dir_targets_map)
def find_targets_by_private_hdr(self, hdr):
"""Find targets by private header file."""
self.lazy_init('find_targets_by_private_hdr ' + hdr)
return self._private_hdrs_target_map.get(hdr, set())
def is_allowed_undeclared_hdr(self, hdr):
self.lazy_init('is_allowed_undeclared_hdr ' + hdr)
return hdr in self._allowed_undeclared_hdrs
def _parse_inclusion_stacks(path, build_dir):
"""Parae headers inclusion stacks from file.
Given the following inclusions found in the app/example/foo.cc.o.H:
. ./app/example/foo.h
.. build64_release/app/example/proto/foo.pb.h
... build64_release/common/rpc/rpc_service.pb.h
. build64_release/app/example/proto/bar.pb.h
. ./common/rpc/rpc_client.h
.. build64_release/common/rpc/rpc_options.pb.h
Return a list of all directly included header files and a list with each item being a list
representing where the header is included from in the current translation unit.
Note that we will STOP tracking at the first generated header (if any)
while other headers included from the header directly or indirectly are
ignored since that part of dependency is ensured by the generator, such
as proto_library.
As shown in the example above, it returns the following directly header list:
[
'app/example/foo.h',
'build64_release/app/example/proto/bar.pb.h',
'common/rpc/rpc_client.h',
]
and the inclusion stacks:
[
['app/example/foo.h', 'build64_release/app/example/proto/foo.pb.h'],
['build64_release/app/example/proto/bar.pb.h'],
['common/rpc/rpc_client.h', 'build64_release/common/rpc/rpc_options.pb.h'],
]
"""
direct_hdrs = [] # The directly included header files
stacks, hdrs_stack = [], []
def _process_hdr(level, hdr, current_level):
if hdr.startswith('/'):
skip_level = level
elif hdr.startswith(build_dir):
skip_level = level
stacks.append(hdrs_stack + [_remove_build_dir_prefix(os.path.normpath(hdr), build_dir)])
else:
current_level = level
hdrs_stack.append(_remove_build_dir_prefix(os.path.normpath(hdr), build_dir))
skip_level = -1
return current_level, skip_level
current_level = 0
skip_level = -1
with open(path) as f:
for line in f:
line = line.rstrip() # Strip `\n`
if not line.startswith('.'):
# The remaining lines are useless for us
break
level, hdr = _parse_hdr_level_line(line)
if level == -1:
console.log('%s: Unrecognized line %s' % (path, line))
break
if level == 1 and not hdr.startswith('/'):
direct_hdrs.append(_remove_build_dir_prefix(os.path.normpath(hdr), build_dir))
if level > current_level:
if skip_level != -1 and level > skip_level:
continue
assert level == current_level + 1
current_level, skip_level = _process_hdr(level, hdr, current_level)
else:
while current_level >= level:
current_level -= 1
hdrs_stack.pop()
current_level, skip_level = _process_hdr(level, hdr, current_level)
return direct_hdrs, stacks
def _parse_hdr_level_line(line):
"""Parse a normal line of a header stack file
Example:
. ./common/rpc/rpc_client.h
"""
pos = line.find(' ')
if pos == -1:
return -1, ''
level = pos
hdr = line[pos + 1:]
if hdr.startswith('./'):
hdr = hdr[2:]
return level, hdr
def _remove_build_dir_prefix(path, build_dir):
"""Remove the build dir prefix of path (e.g. build64_release/)
Args:
path:str, the full path starts from the workspace root
"""
prefix = build_dir + os.sep
if path.startswith(prefix):
return path[len(prefix):]
return path
class Checker(object):
"""C/C++ Header file inclusion dependency checker"""
def __init__(self, target):
self.type = target['type']
self.name = target['name']
self.path = target['path']
self.key = target['key']
self.deps = target['deps']
self.build_dir = target['build_dir']
self.expanded_srcs = target['expanded_srcs']
self.expanded_hdrs = target['expanded_hdrs']
self.source_location = target['source_location']
self.declared_hdrs = target['declared_hdrs']
self.declared_incs = target['declared_incs']
self.declared_genhdrs = target['declared_genhdrs']
self.declared_genincs = target['declared_genincs']
self.hdrs_deps = target['hdrs_deps']
self.private_hdrs_deps = target['private_hdrs_deps']
self.allowed_undeclared_hdrs = target['allowed_undeclared_hdrs']
self.suppress = target['suppress']
self.severity = target['severity']
inclusion_declaration_file = os.path.join(self.build_dir, 'inclusion_declaration.data')
self.global_declaration = GlobalDeclaration(inclusion_declaration_file)
def _find_inclusion_file(self, src, is_header):
"""Find the '.H' file for the given src.
The `.H` file is generated from gcc's `-H` option, see
https://gcc.gnu.org/onlinedocs/gcc/Preprocessor-Options.html
for details.
"""
# NOTE: The inclusion file for header file and impl file has different extension name.
objs_dir = os.path.join(self.build_dir, self.path, self.name + '.objs')
path = ('%s.H' if is_header else '%s.o.H') % os.path.join(objs_dir, src)
if not os.path.exists(path):
return ''
return path
def _hdr_is_declared(self, hdr):
return self._hdr_is_declared_in(hdr, self.declared_hdrs, self.declared_incs)
def _hdr_is_transitive_declared(self, hdr):
return self._hdr_is_declared_in(hdr, self.declared_genhdrs, self.declared_genincs)
def _hdr_is_declared_in(self, hdr, declared_hdrs, declared_incs):
if hdr in declared_hdrs:
return True
for dir in declared_incs:
if hdr.startswith(dir):
return True
return False
def _check_direct_headers(self, full_src, direct_hdrs, suppressd_hdrs,
missing_dep_hdrs, undeclared_hdrs, check_msg):
"""Verify directly included header files is in deps."""
msg = []
for hdr in direct_hdrs:
if hdr in self.declared_hdrs:
console.diagnose(self.source_location, 'debug', '"%s" is a declared header' % (hdr))
continue
libs = self.find_libs_by_header(hdr)
if not libs:
libs = self.find_targets_by_private_hdr(hdr)
if libs and self.key not in libs:
msg.append(' "%s" is a private header file of %s' % (
hdr, self._or_joined_libs(libs)))
continue
console.diagnose(self.source_location, 'debug', '"%s" is an undeclared header' % hdr)
undeclared_hdrs.add(hdr)
# We need also check suppressd_hdrs because target maybe not loaded in partial build
if hdr not in suppressd_hdrs and not self.is_allowed_undeclared_hdr(hdr):
msg.append(' %s' % self._header_undeclared_message(hdr))
continue
deps = set(self.deps + [self.key]) # Don't forget target itself
if not (libs & deps): # pylint: disable=superfluous-parens
# NOTE:
# We just don't report a suppressd hdr, but still need to record it as a failure.
# Because a passed src will not be verified again, even if we remove it from the
# suppress list.
# Same reason in the _check_generated_headers.
missing_dep_hdrs.add(hdr)
if hdr not in suppressd_hdrs:
msg.append(' For %s' % self._hdr_declaration_message(hdr, libs))
if msg:
check_msg.append(' In file included from "%s",' % full_src)
check_msg += msg
def find_libs_by_header(self, hdr):
# Find from the local incchk file firstly to avoid loading the large global declaration.
# The same below.
if hdr in self.hdrs_deps:
return self.hdrs_deps[hdr]
return self.global_declaration.find_libs_by_header(hdr)
def find_targets_by_private_hdr(self, hdr):
if hdr in self.private_hdrs_deps:
return self.private_hdrs_deps[hdr]
return self.global_declaration.find_targets_by_private_hdr(hdr)
def is_allowed_undeclared_hdr(self, hdr):
if hdr in self.allowed_undeclared_hdrs:
return self.allowed_undeclared_hdrs[hdr]
return self.global_declaration.is_allowed_undeclared_hdr(hdr)
def _header_undeclared_message(self, hdr):
msg = '"%s" is not declared in any cc target. ' % hdr
if util.path_under_dir(hdr, self.path):
msg += 'If it belongs to this target, it should be declared in "src"'
if self.type.endswith('_library'):
msg += ' if it is private or in "hdrs" if it is public'
msg += ', otherwise '
msg += 'it should be declared in "hdrs" of the appropriate library to which it belongs'
return msg
def _hdr_declaration_message(self, hdr, libs=None):
if libs is None:
libs = self.find_libs_by_header(hdr)
if not libs:
return '"%s"' % hdr
return '"%s", which belongs to %s' % (hdr, self._or_joined_libs(libs))
def _or_joined_libs(self, libs):
"""Return " or " joind libs descriptive string."""
def beautify(lib):
# Convert full path to ':' started form if it is in same directory as this target.
if lib.startswith(self.path + ':'):
return lib[len(self.path):]
return '//' + lib
return ' or '.join(['"%s"' % beautify(lib) for lib in libs])
def _check_generated_headers(self, full_src, stacks, direct_hdrs, suppressd_hdrs,
missing_dep_hdrs, check_msg):
"""
Verify indirectly included generated header files is in deps.
"""
msg = []
for stack in stacks:
generated_hdr = stack[-1]
if generated_hdr in direct_hdrs: # Already verified as direct_hdrs
continue
if self._hdr_is_transitive_declared(generated_hdr):
continue
stack.pop()
missing_dep_hdrs.add(generated_hdr)
if generated_hdr in suppressd_hdrs:
continue
msg.append(' For %s' % self._hdr_declaration_message(generated_hdr))
if not stack:
msg.append(' In file included from "%s"' % full_src)
else:
stack.reverse()
msg.append(' In file included from %s' % self._hdr_declaration_message(stack[0]))
prefix = ' from %s'
msg += [prefix % self._hdr_declaration_message(h) for h in stack[1:]]
msg.append(prefix % ('"%s"' % full_src))
check_msg += msg
def check(self):
"""
Check whether included header files is declared in "deps" correctly.
Returns:
Whether nothing is wrong.
"""
missing_details = {} # {src: list(hdrs)}
undeclared_hdrs = set()
all_direct_hdrs = set()
all_generated_hdrs = set()
direct_check_msg = []
generated_check_msg = []
def check_file(src, full_src, is_header):
if util.path_under_dir(full_src, self.build_dir): # Don't check generated files.
return
path = self._find_inclusion_file(src, is_header)
if not path:
console.warning('No inclusion file found for %s' % full_src)
return
direct_hdrs, stacks = _parse_inclusion_stacks(path, self.build_dir)
all_direct_hdrs.update(direct_hdrs)
missing_dep_hdrs = set()
self._check_direct_headers(
full_src, direct_hdrs, self.suppress.get(src, []),
missing_dep_hdrs, undeclared_hdrs, direct_check_msg)
for stack in stacks:
all_generated_hdrs.add(stack[-1])
# But direct headers can not cover all, so it is still useful
self._check_generated_headers(
full_src, stacks, direct_hdrs,
self.suppress.get(src, []),
missing_dep_hdrs, generated_check_msg)
if missing_dep_hdrs:
missing_details[src] = list(missing_dep_hdrs)
for src, full_src in self.expanded_srcs:
check_file(src, full_src, is_header=False)
for hdr, full_hdr in self.expanded_hdrs:
check_file(hdr, full_hdr, is_header=True)
severity = self.severity
if direct_check_msg:
console.diagnose(self.source_location, severity,
'%s: Missing dependency declaration:\n%s' % (self.name, '\n'.join(direct_check_msg)))
if generated_check_msg:
console.diagnose(self.source_location, severity,
'%s: Missing indirect dependency declaration:\n%s' % (self.name, '\n'.join(generated_check_msg)))
ok = (severity != 'error' or not direct_check_msg and not generated_check_msg)
details = {}
if missing_details:
details['missing_dep'] = missing_details
if undeclared_hdrs:
details['undeclared'] = sorted(undeclared_hdrs)
details['direct_hdrs'] = all_direct_hdrs
details['generated_hdrs'] = all_generated_hdrs
return ok, details
def check(target_check_info_file):
target = pickle.load(open(target_check_info_file, 'rb'))
checker = Checker(target)
return checker.check()
|
cli/gotypes.py | rizalgowandy/nice | 186 | 12607672 | <reponame>rizalgowandy/nice
#!/usr/bin/env python
types = [
("bool", "Bool", "strconv.FormatBool(bool(*%s))", "*%s == false"),
("uint8", "Uint8", "strconv.FormatUint(uint64(*%s), 10)", "*%s == 0"),
("uint16", "Uint16", "strconv.FormatUint(uint64(*%s), 10)", "*%s == 0"),
("uint32", "Uint32", "strconv.FormatUint(uint64(*%s), 10)", "*%s == 0"),
("uint64", "Uint64", "strconv.FormatUint(uint64(*%s), 10)", "*%s == 0"),
("int8", "Int8", "strconv.FormatInt(int64(*%s), 10)", "*%s == 0"),
("int16", "Int16", "strconv.FormatInt(int64(*%s), 10)", "*%s == 0"),
("int32", "Int32", "strconv.FormatInt(int64(*%s), 10)", "*%s == 0"),
("int64", "Int64", "strconv.FormatInt(int64(*%s), 10)", "*%s == 0"),
("float32", "Float32", "strconv.FormatFloat(float64(*%s), 'g', -1, 32)", "*%s == 0.0"),
("float64", "Float64", "strconv.FormatFloat(float64(*%s), 'g', -1, 64)", "*%s == 0.0"),
("string", "String", "string(*%s)", "*%s == \"\""),
("int", "Int", "strconv.Itoa(int(*%s))", "*%s == 0"),
("uint", "Uint", "strconv.FormatUint(uint64(*%s), 10)", "*%s == 0"),
("time.Duration", "Duration", "(*time.Duration)(%s).String()", "*%s == 0"),
# TODO: Func
]
imports = [
"time"
]
imports_stringer = [
"strconv"
]
|
util/network_utils.py | giuseppefutia/GraphTSNE | 120 | 12607677 | <gh_stars>100-1000
import torch
import numpy as np
from util.graph_utils import neighbor_sampling
from core.GraphDataBlock import GraphDataBlock
def save_checkpoint(state, filename):
torch.save(state, filename)
def get_net_projection(net, dataset, n_batches=1, n_components=2):
"""
Get visualization of dataset using a projection net
Args:
net (GraphConvNet): projection net
dataset (EmbeddingDataSet): dataset to project
n_batches (int): number of batches to split dataset
n_components (int): dimensional of output
Returns:
y_pred (np.array): low dimensional map of data points, matrix of size n x n_components
"""
net.eval()
dataset.create_all_data(n_batches=n_batches, shuffle=False)
if n_batches == 1:
return _get_net_projection(net, dataset.all_data[0])
y_pred = np.zeros((len(dataset.labels), n_components))
for G in dataset.all_data:
y_pred_original = _get_net_projection(net, G, sampling=False, dataset=dataset)
y_pred[G.original_indices] = y_pred_original # Place results into full matrix
return y_pred
def _get_net_projection(net, G, sampling=False, dataset=None):
"""
Helper function for get_net_projection
Args:
net (GraphConvNet): projection net
G (GraphDataBlock): graph block to project
sampling (Boolean): whether to expand the graph block via neighbor sampling
dataset (EmbeddingDataSet): provided as input to perform neighbor sampling
Returns:
y_pred_original (np.array): low dimensional map of data points, matrix of size n x n_components
"""
if not sampling:
if torch.cuda.is_available():
return net.forward(G).cpu().detach().numpy()
else:
return net.forward(G).detach().numpy()
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
assert is_sorted(G.original_indices)
assert dataset is not None
original_idx = G.original_indices
neighborhood_idx = neighbor_sampling(dataset.adj_matrix, original_idx, [-1, -1])
# Package into GraphDataBlock
inputs_subset = dataset.inputs[neighborhood_idx]
labels_subset = dataset.labels[neighborhood_idx]
adj_subset = dataset.adj_matrix[neighborhood_idx, :][:, neighborhood_idx]
G = GraphDataBlock(inputs_subset, labels=labels_subset, W=adj_subset)
# Get projection of the expanded GraphDataBlock, without sampling this time
y_pred_neighborhood = _get_net_projection(net, G, sampling=False)
# Get mask of indices of original within neighborhood
ix = np.isin(neighborhood_idx, original_idx)
# Retrieve predictions for original indices only
y_pred_original = y_pred_neighborhood[ix]
return y_pred_original
|
alipay/aop/api/domain/UserInfomation.py | antopen/alipay-sdk-python-all | 213 | 12607688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OrderExtInfo import OrderExtInfo
class UserInfomation(object):
def __init__(self):
self._cert_no = None
self._cert_type = None
self._ext_info = None
self._mobile = None
self._name = None
self._user_id = None
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, list):
self._ext_info = list()
for i in value:
if isinstance(i, OrderExtInfo):
self._ext_info.append(i)
else:
self._ext_info.append(OrderExtInfo.from_alipay_dict(i))
@property
def mobile(self):
return self._mobile
@mobile.setter
def mobile(self, value):
self._mobile = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.cert_no:
if hasattr(self.cert_no, 'to_alipay_dict'):
params['cert_no'] = self.cert_no.to_alipay_dict()
else:
params['cert_no'] = self.cert_no
if self.cert_type:
if hasattr(self.cert_type, 'to_alipay_dict'):
params['cert_type'] = self.cert_type.to_alipay_dict()
else:
params['cert_type'] = self.cert_type
if self.ext_info:
if isinstance(self.ext_info, list):
for i in range(0, len(self.ext_info)):
element = self.ext_info[i]
if hasattr(element, 'to_alipay_dict'):
self.ext_info[i] = element.to_alipay_dict()
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.mobile:
if hasattr(self.mobile, 'to_alipay_dict'):
params['mobile'] = self.mobile.to_alipay_dict()
else:
params['mobile'] = self.mobile
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UserInfomation()
if 'cert_no' in d:
o.cert_no = d['cert_no']
if 'cert_type' in d:
o.cert_type = d['cert_type']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'mobile' in d:
o.mobile = d['mobile']
if 'name' in d:
o.name = d['name']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
social/apps/flask_app/utils.py | raccoongang/python-social-auth | 1,987 | 12607728 | <filename>social/apps/flask_app/utils.py
from social_flask.utils import get_helper, load_strategy, load_backend, psa, strategy
|
Problem040/Python/solution_1.py | drocha87/ProjectEuler | 167 | 12607743 | #!/usr/bin/env python
# coding=utf-8
# Python Script
#
# Copyleft © <NAME>
#
#
from functools import reduce
from itertools import count
def frac_series_generator():
'''each n after 0.1 from 0.12345678910111213...'''
for w in count(start=1, step=1):
for s in str(w):
yield s
def search_digit_by_index(indexes):
'''get the digits of indexes of frac_series'''
limit = max(indexes)
digits = {x: 0 for x in indexes}
for c, n in enumerate(frac_series_generator()):
if c + 1 in digits:
digits[c + 1] = int(n)
if c + 1 >= limit:
break
return digits.values()
def main():
indexes = [1, 10, 100, 1000, 10000, 100000, 1000000]
print(reduce(int.__mul__, search_digit_by_index(indexes)))
if __name__ == '__main__':
main()
|
backend/fastapi/build_dsApp/Authentication/api_key_header.py | spideynolove/Other-repo | 107 | 12607749 | from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.security import APIKeyHeader
API_TOKEN = "SECRET_API_TOKEN"
app = FastAPI()
api_key_header = APIKeyHeader(name="Token")
@app.get("/protected-route")
async def protected_route(token: str = Depends(api_key_header)):
if token != API_TOKEN:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
return {"hello": "world"}
|
tools/reval.py | xctspring/iter-reason | 275 | 12607754 | #!/usr/bin/env python
# Reval = re-eval. Re-evaluate saved detections.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.config import cfg
from datasets.factory import get_imdb
try:
import cPickle as pickle
except ImportError:
import pickle
import os, sys, argparse
import numpy as np
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Re-evaluate results')
parser.add_argument('output_dir', nargs=1, help='results directory',
type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to re-evaluate',
default='ade_mtest_5', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def from_results(imdb_name, output_dir, args):
imdb = get_imdb(imdb_name)
with open(os.path.join(output_dir, 'results.pkl'), 'rb') as f:
results = pickle.load(f)
print('Evaluating detections')
imdb.evaluate(results, output_dir)
if __name__ == '__main__':
args = parse_args()
output_dir = os.path.abspath(args.output_dir[0])
imdb_name = args.imdb_name
from_results(imdb_name, output_dir, args)
|
Trakttv.bundle/Contents/Libraries/Shared/exception_wrappers/database/apsw/base.py | disrupted/Trakttv.bundle | 1,346 | 12607763 | from __future__ import absolute_import
from exception_wrappers.exceptions import DatabaseDisabledError
from exception_wrappers.libraries import apsw
from exception_wrappers.manager import ExceptionWrapper
import logging
log = logging.getLogger(__name__)
disabled_databases = {}
class APSWBaseWrapper(object):
name = None
critical_errors = (
apsw.CantOpenError,
apsw.CorruptError,
apsw.FullError,
apsw.IOError,
apsw.NotADBError,
apsw.PermissionsError,
apsw.ReadOnlyError
)
@property
def error_message(self):
return disabled_databases.get(self.name)
@property
def valid(self):
return disabled_databases.get(self.name) is None
def on_exception(self, source, exc_info):
log.info('Exception raised in %s: %s', source, exc_info[1], exc_info=True)
# Raise exception if the database is already disabled
if disabled_databases.get(self.name):
raise DatabaseDisabledError(exc_info[1])
# Mark database as disabled
disabled_databases[self.name] = exc_info
# Emit exception event
ExceptionWrapper.add(source, exc_info, self.name)
# Raise exception
raise DatabaseDisabledError(exc_info[1])
|
2018/03/15/How to Use Django REST Framework Permissions/api_example/api_example/languages/views.py | kenjitagawa/youtube_video_code | 492 | 12607786 | from django.shortcuts import render
from rest_framework import viewsets, permissions
from .models import Language, Programmer, Paradigm
from .serializers import LanguageSerializer, ParadigmSerializer, ProgrammerSerializer
class LanguageView(viewsets.ModelViewSet):
queryset = Language.objects.all()
serializer_class = LanguageSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class ParadigmView(viewsets.ModelViewSet):
queryset = Paradigm.objects.all()
serializer_class = ParadigmSerializer
class ProgrammerView(viewsets.ModelViewSet):
queryset = Programmer.objects.all()
serializer_class = ProgrammerSerializer |
esphome/components/tuya/cover/__init__.py | OttoWinter/esphomeyaml | 249 | 12607830 | from esphome.components import cover
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import (
CONF_OUTPUT_ID,
CONF_MIN_VALUE,
CONF_MAX_VALUE,
CONF_RESTORE_MODE,
)
from .. import tuya_ns, CONF_TUYA_ID, Tuya
DEPENDENCIES = ["tuya"]
CONF_CONTROL_DATAPOINT = "control_datapoint"
CONF_DIRECTION_DATAPOINT = "direction_datapoint"
CONF_POSITION_DATAPOINT = "position_datapoint"
CONF_POSITION_REPORT_DATAPOINT = "position_report_datapoint"
CONF_INVERT_POSITION = "invert_position"
TuyaCover = tuya_ns.class_("TuyaCover", cover.Cover, cg.Component)
TuyaCoverRestoreMode = tuya_ns.enum("TuyaCoverRestoreMode")
RESTORE_MODES = {
"NO_RESTORE": TuyaCoverRestoreMode.COVER_NO_RESTORE,
"RESTORE": TuyaCoverRestoreMode.COVER_RESTORE,
"RESTORE_AND_CALL": TuyaCoverRestoreMode.COVER_RESTORE_AND_CALL,
}
def validate_range(config):
if config[CONF_MIN_VALUE] > config[CONF_MAX_VALUE]:
raise cv.Invalid(
f"min_value ({config[CONF_MIN_VALUE]}) cannot be greater than max_value ({config[CONF_MAX_VALUE]})"
)
return config
CONFIG_SCHEMA = cv.All(
cover.COVER_SCHEMA.extend(
{
cv.GenerateID(CONF_OUTPUT_ID): cv.declare_id(TuyaCover),
cv.GenerateID(CONF_TUYA_ID): cv.use_id(Tuya),
cv.Optional(CONF_CONTROL_DATAPOINT): cv.uint8_t,
cv.Optional(CONF_DIRECTION_DATAPOINT): cv.uint8_t,
cv.Required(CONF_POSITION_DATAPOINT): cv.uint8_t,
cv.Optional(CONF_POSITION_REPORT_DATAPOINT): cv.uint8_t,
cv.Optional(CONF_MIN_VALUE, default=0): cv.int_,
cv.Optional(CONF_MAX_VALUE, default=100): cv.int_,
cv.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
cv.Optional(CONF_RESTORE_MODE, default="RESTORE"): cv.enum(
RESTORE_MODES, upper=True
),
},
).extend(cv.COMPONENT_SCHEMA),
validate_range,
)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_OUTPUT_ID])
await cg.register_component(var, config)
await cover.register_cover(var, config)
if CONF_CONTROL_DATAPOINT in config:
cg.add(var.set_control_id(config[CONF_CONTROL_DATAPOINT]))
if CONF_DIRECTION_DATAPOINT in config:
cg.add(var.set_direction_id(config[CONF_DIRECTION_DATAPOINT]))
cg.add(var.set_position_id(config[CONF_POSITION_DATAPOINT]))
if CONF_POSITION_REPORT_DATAPOINT in config:
cg.add(var.set_position_report_id(config[CONF_POSITION_REPORT_DATAPOINT]))
cg.add(var.set_min_value(config[CONF_MIN_VALUE]))
cg.add(var.set_max_value(config[CONF_MAX_VALUE]))
cg.add(var.set_invert_position(config[CONF_INVERT_POSITION]))
cg.add(var.set_restore_mode(config[CONF_RESTORE_MODE]))
paren = await cg.get_variable(config[CONF_TUYA_ID])
cg.add(var.set_tuya_parent(paren))
|
test/cluster_test/setup.py | viyadb/viyadb | 109 | 12607834 | #!/usr/bin/env python3
import http.server
import json
import kafka
import requests
import socketserver
import time
consul_url = 'http://consul:8500/v1/kv'
def wait_for_consul():
while True:
try:
requests.head(consul_url)
break
except requests.exceptions.ConnectionError:
print('Consul is not available ... will retry in 1s')
time.sleep(1)
pass
def consul_put_kv(key, value):
r = requests.put(
'{}/{}'.format(consul_url, key), data=json.dumps(value, indent=True))
r.raise_for_status()
def write_config():
"""Write all needed configuration in Consul"""
wait_for_consul()
consul_put_kv('viyadb/clusters/cluster001/config', {
'replication_factor': 2,
'workers': 4,
'tables': ['events'],
'indexers': ['main']
})
consul_put_kv(
'viyadb/tables/events/config', {
'name':
'events',
'dimensions': [
{'name': 'app_id'}, \
{'name': 'user_id', 'type': 'uint'}, \
{'name': 'event_time', 'type': 'time', 'format': 'millis', 'granularity': 'day'}, \
{'name': 'country'}, \
{'name': 'city'}, \
{'name': 'device_type'}, \
{'name': 'device_vendor'}, \
{'name': 'ad_network'}, \
{'name': 'campaign'}, \
{'name': 'site_id'}, \
{'name': 'event_type'}, \
{'name': 'event_name'}, \
{'name': 'organic', 'cardinality': 2}, \
{'name': 'days_from_install', 'type': 'ushort'} \
],
'metrics': [
{'name': 'revenue', 'type': 'double_sum'}, \
{'name': 'users', 'type': 'bitset', 'field': 'user_id', 'max': 4294967295}, \
{'name': 'count', 'type': 'count'} \
]
})
consul_put_kv(
'viyadb/indexers/main/config', {
'tables': ['events'],
'deepStorePath': '/tmp/viyadb/deepstore',
'realTime': {
'windowDuration': 'PT20S',
'kafkaSource': {
'topics': ['events'],
'brokers': ['kafka:9092']
},
'parseSpec': {
'format': 'json',
'timeColumn': {
'name': 'event_time'
}
},
'notifier': {
'type': 'kafka',
'channel': 'kafka:9092',
'queue': 'rt-notifications'
}
},
'batch': {
'batchDuration': 'PT1M',
'partitioning': {
'columns': ['app_id'],
'partitions': 2
},
'notifier': {
'type': 'kafka',
'channel': 'kafka:9092',
'queue': 'batch-notifications'
}
}
})
def wait_for_kafka():
while True:
try:
return kafka.KafkaProducer(
bootstrap_servers='kafka:9092',
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
except kafka.errors.NoBrokersAvailable:
print('Kafka is not available ... will retry in 3s')
time.sleep(3)
def init_kafka_notifications():
"""Setup Kafka notification topics initial contents"""
producer = wait_for_kafka()
columns = [
'app_id', 'user_id', 'event_time', 'country', 'city', 'device_type',
'device_vendor', 'ad_network', 'campaign', 'site_id', 'event_type',
'event_name', 'organic', 'days_from_install', 'revenue', 'users',
'count'
]
rt_notifications = [
{'id':1565439340000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439300000/mb=1565439340000'],'columns':columns,'recordCount':127}},'offsets':[{'topic':'events','partition':0,'offset':139}]}, \
{'id':1565439360000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439360000/mb=1565439360000'],'columns':columns,'recordCount':181}},'offsets':[{'topic':'events','partition':0,'offset':349}]}, \
{'id':1565439380000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439360000/mb=1565439380000'],'columns':columns,'recordCount':166}},'offsets':[{'topic':'events','partition':0,'offset':537}]}, \
{'id':1565439400000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439360000/mb=1565439400000'],'columns':columns,'recordCount':178}},'offsets':[{'topic':'events','partition':0,'offset':743}]}, \
{'id':1565439420000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439420000/mb=1565439420000'],'columns':columns,'recordCount':178}},'offsets':[{'topic':'events','partition':0,'offset':938}]}, \
{'id':1565439440000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439420000/mb=1565439440000'],'columns':columns,'recordCount':177}},'offsets':[{'topic':'events','partition':0,'offset':1141}]}, \
{'id':1565439460000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439420000/mb=1565439460000'],'columns':columns,'recordCount':179}},'offsets':[{'topic':'events','partition':0,'offset':1346}]}, \
]
batch_notifications = [
{'id':1565439300000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/batch/events/dt=1565439300000'],'columns':columns,'partitioning':[0,1],'partitionConf':{'columns':['app_id'],'partitions':2},'recordCount':127}},'microBatches':[1565439340000]}, \
{'id':1565439360000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/batch/events/dt=1565439360000'],'columns':columns,'partitioning':[0,1],'partitionConf':{'columns':['app_id'],'partitions':2},'recordCount':612}},'microBatches':[1565439360000,1565439380000,1565439400000]}, \
{'id':1565439420000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/batch/events/dt=1565439420000'],'columns':columns,'partitioning':[0,1],'partitionConf':{'columns':['app_id'],'partitions':2},'recordCount':1004}},'microBatches':[1565439420000,1565439440000,1565439460000]}, \
]
for e in rt_notifications:
producer.send('rt-notifications', e).get()
for e in batch_notifications:
producer.send('batch-notifications', e).get()
def start_webserver():
"""This webserver notifies other test components that everything is configured"""
with socketserver.TCPServer(('0.0.0.0', 8080),
http.server.SimpleHTTPRequestHandler) as httpd:
httpd.serve_forever()
if __name__ == '__main__':
write_config()
init_kafka_notifications()
start_webserver()
|
Python/rabbit_group problem.py | shruti8301/Algorithms-Cheatsheet-Resources | 199 | 12607844 | '''
There are some colored rabbits in a forest. Given an array arr[]
of size N, such that arr[i] denotes the number of rabbits having
same color as the ith rabbit, the task is to find the minimum number
of rabbits that could be in the forest.
'''
import math
def rabbits(A):
d={}
for i in A:
if i in d:
d[i]+=1
else:
d[i]=1
ans=0
for i in d:
gs=i+1
reportees=d[i]
ng=int(math.ceil(reportees*1.0/gs*1.0))
ans+=ng*gs
return ans
A=[10,10,10]
print(rabbits(A))
|
utils/utils.py | Ynjxsjmh/mtl | 385 | 12607897 | <filename>utils/utils.py<gh_stars>100-1000
from src.models import MiniAutoencoder, SegNetAutoencoder, SegNetArgmaxAE, SegNetTest, ResNetAutoencoder
import config
import tensorflow as tf
from functools import reduce
import os
import utils.data_handler as dh
import numpy as np
import matplotlib.pyplot as plt
import src.OPTICS as OPTICS
colors = tf.cast(tf.stack(config.colors[config.working_dataset]), tf.float32) # / 255
FLAGS = tf.app.flags.FLAGS
def get_autoencoder(autoencoder_name, dataset_name, strided):
n_labels = len(config.colors[dataset_name])
autoencoders = {
'mini': MiniAutoencoder,
'SegNet': SegNetTest,
'CityScapes' : SegNetTest,
'CityScapes_old' : SegNetAutoencoder,
'ResNet': ResNetAutoencoder
}
if autoencoder_name == 'SegNet':
n_labels = [n_labels, 2, 1]
return autoencoders[autoencoder_name](n_labels, strided=strided)
def restore_logs(logfile):
'''
Fixed - will now not delete existing log files but add sub-index to path
:param logfile:
:return:
'''
if tf.gfile.Exists(logfile):
print('logfile already exist: %s' % logfile)
# i = 1
# while os.path.exists(logfile + '_' + str(i)):
# i += 1
# logfile = logfile + '_' + str(i)
# print('Creating anf writing to: %s' % logfile)
tf.gfile.DeleteRecursively(logfile)
tf.gfile.MakeDirs(logfile)
def color_mask(tensor, color):
return tf.reduce_all(tf.equal(tensor, color), 3)
def one_hot(labels, is_color=True):
if is_color:
color_tensors = tf.unstack(colors)
channel_tensors = list(map(lambda color: color_mask(tf.cast(labels, tf.float32), color), color_tensors))
one_hot_labels = tf.cast(tf.stack(channel_tensors, 3), 'float32')
else:
# TODO: Need to create images of each label from 1 to 33 in size of label image
colors_labelIds = tf.cast(tf.range(len(config.colors[config.working_dataset])), tf.float32)
color_tensors = tf.unstack(colors_labelIds)
channel_tensors = list(map(lambda color: color_mask(labels, color), color_tensors))
one_hot_labels = tf.cast(tf.stack(channel_tensors, 3), 'float32')
return one_hot_labels
def rgb(logits, need_resize=False):
softmax = tf.nn.softmax(logits)
argmax = tf.argmax(softmax, -1)
n = colors.get_shape().as_list()[0]
one_hot = tf.one_hot(argmax, n, dtype=tf.float32)
one_hot_matrix = tf.reshape(one_hot, [-1, n])
rgb_matrix = tf.matmul(one_hot_matrix, colors)
rgb_tensor = tf.reshape(rgb_matrix, [-1, FLAGS.output_height, FLAGS.output_width, 3])
return tf.cast(rgb_tensor, tf.float32)
def labelId(logits):
softmax = tf.nn.softmax(logits)
argmax = tf.argmax(softmax, 3)
argmax_expand = tf.expand_dims(argmax, -1)
return tf.cast(argmax_expand*7, tf.float32)
def disparity(logits):
return tf.cast(logits, tf.float32)
def onehot_to_rgb(one_hot):
n = colors.get_shape().as_list()[0]
one_hot_matrix = tf.reshape(one_hot, [-1, n])
rgb_matrix = tf.matmul(one_hot_matrix, colors)
rgb_tensor = tf.reshape(rgb_matrix, [-1, FLAGS.output_height, FLAGS.output_width, 3])
return tf.cast(rgb_tensor, tf.float32)
def accuracy(logits, labels):
if FLAGS.need_resize:
labels = tf.image.resize_images(labels, [FLAGS.output_height, FLAGS.output_width],
method=tf.image.ResizeMethod.BILINEAR, align_corners=False)
softmax = tf.nn.softmax(logits, dim=3)
argmax = tf.argmax(softmax, 3)
shape = logits.get_shape().as_list()
n = shape[3]
one_hot = tf.one_hot(argmax, n, dtype=tf.float32)
equal_pixels = tf.reduce_sum(tf.to_float(color_mask(one_hot, labels)))
total_pixels = reduce(lambda x, y: x * y, [FLAGS.batch] + shape[1:3])
return equal_pixels / total_pixels
def make_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def make_model(autoencoder_name='ResNet'):
input_ph, ground_truths_ph, ground_truths, pre_processed_input = dh.get_place_holders()
autoencoder = get_autoencoder(autoencoder_name, config.working_dataset, config.strided)
logits = autoencoder.inference(pre_processed_input)
return autoencoder, logits, input_ph, ground_truths_ph, ground_truths, pre_processed_input
def get_run_list(logits, INF_FLAGS):
run_list = []
if INF_FLAGS['use_label_type']:
labelId_image = rgb(logits[0])
run_list.append(tf.cast(labelId_image, tf.uint8))
if INF_FLAGS['use_label_inst']:
run_list.append(logits[1])
if INF_FLAGS['use_label_disp']:
run_list.append(logits[2])
return run_list
def pred_list2dict(pred_list, INF_FLAGS):
pred_dict = {}
if INF_FLAGS['use_label_disp']:
image = np.expand_dims(pred_list.pop().squeeze().clip(max=1, min=0)*255, 2).astype('uint8')
image = np.concatenate([image, image, image], axis=2)
pred_dict['disp'] = image
if INF_FLAGS['use_label_inst']:
pred_dict['instance'] = pred_list.pop().squeeze()
if INF_FLAGS['use_label_type']:
pred_dict['label'] = pred_list.pop().squeeze()
return pred_dict
def calc_instance(label_arr, xy_arr):
mask = make_mask(label_arr)
raw_image = np.concatenate([xy_arr, np.expand_dims(mask, axis=2)], axis=2)
instance_image = OPTICS.calc_clusters_img(raw_image)
return instance_image.clip(max=255, min=0).astype('uint8')
def make_mask(label_image):
ids = [24, 26]
for i, id in enumerate(ids):
color = config.colors[config.working_dataset][id]
mask = label_image == color
mask = mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]
if i == 0:
total_mask = mask
else:
total_mask = total_mask + mask
return total_mask
|
src/genie/libs/parser/iosxe/tests/ShowMacAddressTable/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 12607936 | expected_output = {
"mac_table": {
"vlans": {
"100": {
"mac_addresses": {
"ecbd.1dff.5f92": {
"drop": {"drop": True, "entry_type": "dynamic"},
"mac_address": "ecbd.1dff.5f92",
},
"3820.56ff.6f75": {
"interfaces": {
"Port-channel12": {
"interface": "Port-channel12",
"entry_type": "dynamic",
}
},
"mac_address": "3820.56ff.6f75",
},
"58bf.eaff.e508": {
"interfaces": {
"Vlan100": {"interface": "Vlan100", "entry_type": "static"}
},
"mac_address": "58bf.eaff.e508",
},
},
"vlan": 100,
},
"all": {
"mac_addresses": {
"0100.0cff.9999": {
"interfaces": {
"CPU": {"interface": "CPU", "entry_type": "static"}
},
"mac_address": "0100.0cff.9999",
},
"0100.0cff.999a": {
"interfaces": {
"CPU": {"interface": "CPU", "entry_type": "static"}
},
"mac_address": "0100.0cff.999a",
},
},
"vlan": "all",
},
"20": {
"mac_addresses": {
"aaaa.bbff.8888": {
"drop": {"drop": True, "entry_type": "static"},
"mac_address": "aaaa.bbff.8888",
}
},
"vlan": 20,
},
"10": {
"mac_addresses": {
"aaaa.bbff.8888": {
"interfaces": {
"GigabitEthernet1/0/8": {
"entry": "*",
"interface": "GigabitEthernet1/0/8",
"entry_type": "static",
},
"GigabitEthernet1/0/9": {
"entry": "*",
"interface": "GigabitEthernet1/0/9",
"entry_type": "static",
},
"Vlan101": {
"entry": "*",
"interface": "Vlan101",
"entry_type": "static",
},
},
"mac_address": "aaaa.bbff.8888",
}
},
"vlan": 10,
},
"101": {
"mac_addresses": {
"58bf.eaff.e5f7": {
"interfaces": {
"Vlan101": {"interface": "Vlan101", "entry_type": "static"}
},
"mac_address": "58bf.eaff.e5f7",
},
"3820.56ff.6fb3": {
"interfaces": {
"Port-channel12": {
"interface": "Port-channel12",
"entry_type": "dynamic",
}
},
"mac_address": "3820.56ff.6fb3",
},
"3820.56ff.6f75": {
"interfaces": {
"Port-channel12": {
"interface": "Port-channel12",
"entry_type": "dynamic",
}
},
"mac_address": "3820.56ff.6f75",
},
},
"vlan": 101,
},
}
},
"total_mac_addresses": 10,
}
|
src/face_detection/startq.py | mykiscool/DeepCamera | 914 | 12607953 | <filename>src/face_detection/startq.py
import subprocess, shlex, os
if __name__ == "__main__":
command = ''
env = os.environ.copy()
env['WORKER_BROKER'] = 'amqp://rabbitmq/'
env['WORKER_TYPE'] = 'od'
command = 'celery worker --loglevel INFO -E -n od -c 1 --autoscale=1,1 -Q od'
od = subprocess.Popen(args=shlex.split(command), env=env)
env['WORKER_TYPE'] = 'flower'
command = 'celery flower'
flower = subprocess.Popen(args=shlex.split(command), env=env)
od.wait()
flower.wait()
|
stocklook/crypto/gdax/__init__.py | zbarge/stocklook | 149 | 12607976 | <filename>stocklook/crypto/gdax/__init__.py
from stocklook.crypto.gdax.feeds.book_feed import GdaxBookFeed
from stocklook.crypto.gdax.feeds import GdaxTradeFeed
from stocklook.crypto.gdax.feeds.websocket_client import GdaxWebsocketClient
from .account import GdaxAccount
from .api import Gdax, GdaxAPIError
from .book import GdaxOrderBook
from .chartdata import GdaxChartData
from .db import GdaxDatabase
from .order import (GdaxOrder,
GdaxOrderSides,
GdaxOrderTypes,
GdaxOrderSystem)
from .product import GdaxProducts, GdaxProduct
from .tables import (GdaxBase,
GdaxSQLProduct,
GdaxSQLQuote,
GdaxSQLOrder,
GdaxSQLHistory,
GdaxSQLFeedEntry)
from .trader import GdaxTrader, GdaxAnalyzer
def scan_price(gdax,
product,
alert_method,
low_notify=None,
high_notify=None,
interval=60,
end_time=None,
change_rate=.015,
alert_addr=None):
"""
Scans a GdaxProduct's orderbook regularly evaluating
ask prices and sending notifications when price hits a high or low target.
This function is designed to run for days, weeks, and months at a time as
it makes adjustments to target high and low prices as they're reached.
:param gdax: (gdax.api.Gdax)
The Gdax API object to be used when calling.
:param product: (str)
ETH-USD, LTC-USD, BTC-USD
:param alert_method: (callable)
Should be a method that can be called like so:
alert_method(alert_addr, text)
:param low_notify: (numeric, default None)
The lower trigger price to alert on.
None will default to: price - (price * change_rate)
This value is decreased by the change rate when reached.
This value is increased by the change rate when the
high_notify price is reached.
:param high_notify: (numeric, default None)
The upper trigger price to alert on.
None will default to: price + (price * change_rate)
This value is increased by the change rate when reached.
This value is decreased by the change rate when
the low_notify price is reached.
:param interval: (int, default 60)
Number of seconds between order book scans.
:param end_time: (DateTime, default None)
The date/time to stop scanning the books
None will default to 99 weeks from function start time.
:param change_rate: (float, default 0.015)
The rate at which the high_notify & low_notify
target values change as the price moves.
:return: None
"""
from time import sleep
from datetime import datetime
from pandas import DateOffset
if alert_addr is None:
alert_addr = '949572<EMAIL>'
both_flags = all((low_notify, high_notify))
if not both_flags:
prod = gdax.get_product(product)
p = prod.price
if low_notify is None:
low = p - (p * change_rate)
low_notify = round(low, 2)
if high_notify is None:
high = p + (p * change_rate)
high_notify = round(high, 2)
if end_time is None:
end_time = datetime.now() + DateOffset(weeks=99)
ask_store, last_ping, i = list(), None, 0
print("Initializing {} price scan "
"looking for low {} "
"and high {}".format(product, low_notify, high_notify))
# This value is increased by ~30%
# every time a notification is sent.
# It represents how many minutes to wait before
# re-sending the same notification
wait_time = 5
while True:
i += 1
now = datetime.now()
try:
book = gdax.get_book(product, level=1)
except Exception as e:
sleep(interval)
print(e)
continue
asks = book['asks']
lowest_ask = float(asks[0][0])
diff_high = round(high_notify - lowest_ask, 2)
diff_low = round(low_notify - lowest_ask, 2)
pct_away_high = round(100 - ((lowest_ask/high_notify)*100), 2)
pct_away_low = round(100 - ((lowest_ask/low_notify)*100), 2)
meets_low = (low_notify and lowest_ask <= low_notify)
meets_high = (high_notify and lowest_ask >= high_notify)
meets_criteria = meets_low or meets_high
# This information should probably somehow get stored?
# NOTE: the program is threaded so it'd need to generate it's own
# SQL connection ....or open and write to a file every 50 lines or something
#ask_store.append([now, lowest_ask, diff_high, pct_away_high, diff_low, pct_away_low])
msg = 'Price: ${} -' \
'Target: ${} - away: ${}/{}% -' \
'Stop: ${}- away: ${}/%{}'.format(lowest_ask,
high_notify,
diff_high,
pct_away_high,
low_notify,
diff_low,
pct_away_low)
msg = '{}: ({}-{})\n{}'.format(str(now)[:19],
i,
product,
msg)
print(msg)
print("\n")
if meets_criteria:
send_msg = True
m = int(wait_time)
min_time = now - DateOffset(minutes=m)
if last_ping and last_ping > min_time:
send_msg = False
if send_msg:
alert_method(alert_addr, msg)
last_ping = now
# Increase/decrease the target price by change_rate
if meets_low:
v = round(low_notify * change_rate, 2)
low_notify -= v
high_notify -= v
if meets_high:
v = round(high_notify * change_rate, 2)
high_notify += v
low_notify += v
if end_time and now >= end_time:
break
sleep(interval)
return ask_store
def dollars(x):
return round(float(x), 2)
def percent(x):
return round(float(x), 2)
def get_buypoint(c: GdaxChartData):
df = c.df
current = c.gdax.get_ticker(c.product)
stats = c.gdax.get_24hr_stats(c.product)
price = float(current['price'])
vol = float(current['volume'])
high = float(stats['high'])
low = float(stats['low'])
vol_ratio = percent(vol / c.avg_vol)
price_ratio = percent(price / c.avg_close)
high_ratio = percent(price / high)
low_ratio = percent(price / low)
rng = dollars(high - low)
high_diff = dollars(high - price)
low_diff = dollars(price - low)
print('ticker:{}\n'
'price: {}\n'
'day high: {}\n'
'day low: {}\n'
'day vol: {}\n'
'vol ratio: {}\n'
'price ratio: {}\n'
'high ratio: {}\n'
'high diff: {}\n'
'low ratio: {}\n'
'low diff: {} '.format(c.product,
price,
high,
low,
vol,
vol_ratio,
price_ratio,
high_ratio,
high_diff,
low_ratio,
low_diff))
print("\n\nRange Analysis:")
if rng <= .02 * price:
# Not even a 2% move in price today... tight range
print("Tight range today - ${}".format(rng))
elif rng >= .1 * price:
# 10% swing today...pretty volatile.
print("Volatile range today - ${}".format(rng))
else:
print("Average-ish range today - ${}".format(rng))
heavy_vol = vol_ratio > 1.5
low_vol = vol_ratio < .5
print("\n\nVolume Analysis\n\n"
"current {}\n"
"average {}:".format(int(vol), int(c.avg_vol)))
if heavy_vol:
print("Heavy volume - ratio {}".format(vol_ratio))
elif low_vol:
print("Low volume - ratio {}".format(vol_ratio))
else:
print("Average-ish volume - ratio {}".format(vol_ratio))
print("\n\nPrice analysis:")
price_chg1 = dollars(df.loc[1:12, 'price_change'].mean())
price_chg2 = dollars(df.loc[12:, 'price_change'].mean())
price_chg3 = dollars(df.loc[1:7, 'price_change'].mean())
print("Avg price change (last 10 periods): {}".format(price_chg1))
print("Avg price change (last 6 periods): {}".format(price_chg3))
print("Avg price change prior to last 10 periods: {}".format(price_chg2))
price_decreasing = price_chg1 < price_chg2
price_increasing = price_chg1 > price_chg2
if price_decreasing:
print("Price is decreasing over last 10 periods.")
elif price_increasing:
print("Price is increasing over last 10 periods.")
lower_rng = price < high - (rng / 2)
upper_rng = not lower_rng
peak_rng = price > high - (rng * .1)
if lower_rng:
txt = "Price is in the lower range - " \
"it might be a nice dip to buy."
if heavy_vol:
txt += 'Volume is heavy so be careful for bearish patterns.'
if price_decreasing:
txt += 'price is also decreasing so thats an extra warning.'
elif low_vol and price_increasing:
txt += 'Volume is low and the price is increasing ' \
'over the last 10 periods so it ' \
'appears to be healthy consolidation.'
elif low_vol and price_decreasing:
txt += 'Volume is low and the price is increasing ' \
'over the last 10 periods so it ' \
'appears to be healthy consolidation.'
print(txt)
elif upper_rng and not peak_rng:
print("Price is in the upper range - "
"it's either about to break out or down."
"Still off the peak by {}".format(dollars(high - price)))
elif peak_rng:
print("Price is near the peak range - "
"watch for break out/down now.")
def generate_candles(product, gdax=None, out_path=None, start=None, end=None, granularity=60*60*24):
"""
Generates a .csv file containing open, high, low, close & volume
information for a given product.
:param product:
:param gdax:
:param out_path:
:param start:
:param end:
:param granularity:
:return: (GdaxChartData, str)
Returns the generated ChartData object along with the out_path.
"""
if gdax is None:
gdax = Gdax()
if out_path is None:
import os
from stocklook.config import config
d = config['DATA_DIRECTORY']
c = product.split('-')[0]
n = '{}_candles.csv'.format(c)
out_path = os.path.join(d, n)
if start is None:
from stocklook.utils.timetools import now_minus
start = now_minus(weeks=4)
if end is None:
from stocklook.utils.timetools import now
end = now()
data = GdaxChartData(gdax, product, start, end, granularity)
data.df.to_csv(out_path, index=False)
get_buypoint(data)
return data, out_path
def sync_database(gdax, db=None, interval=60):
"""
Updates a GdaxDatabase with
pricing information on a regular basis.
:param gdax: (gdax.api.Gdax)
The api object
:param db: (gdax.db.GdaxDatabase)
The database to load updates into.
:param interval: (int, default 60)
The number of seconds to wait between updates.
:return:
"""
from time import sleep
if db is None:
db = GdaxDatabase(gdax)
while True:
db.sync_quotes()
sleep(interval)
|
fastapi/openapi/constants.py | Aryabhata-Rootspring/fastapi | 53,007 | 12607979 | <reponame>Aryabhata-Rootspring/fastapi<filename>fastapi/openapi/constants.py<gh_stars>1000+
METHODS_WITH_BODY = {"GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"}
STATUS_CODES_WITH_NO_BODY = {100, 101, 102, 103, 204, 304}
REF_PREFIX = "#/components/schemas/"
|
tests/test_codebase/test_mmocr/data/dbnet.py | zhiqwang/mmdeploy | 746 | 12608002 | <gh_stars>100-1000
# Copyright (c) OpenMMLab. All rights reserved.
model = dict(
type='DBNet',
backbone=dict(
type='mmdet.ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18'),
norm_eval=False,
style='caffe'),
neck=dict(type='FPNC', in_channels=[2, 4, 8, 16], lateral_channels=8),
bbox_head=dict(
type='DBHead',
text_repr_type='quad',
in_channels=8,
loss=dict(type='DBLoss', alpha=5.0, beta=10.0, bbce_loss=True)),
train_cfg=None,
test_cfg=None)
dataset_type = 'IcdarDataset'
data_root = 'tests/test_codebase/test_mmocr/data'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
img_scale=(128, 64),
flip=False,
transforms=[
dict(type='Resize', img_scale=(256, 128), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
test_dataloader=dict(samples_per_gpu=1),
test=dict(
type=dataset_type,
ann_file=data_root + '/text_detection.json',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=100, metric='hmean-iou')
|
src/super_gradients/training/utils/sg_model_utils.py | Deci-AI/super-gradients | 308 | 12608011 | <filename>src/super_gradients/training/utils/sg_model_utils.py
import os
import sys
import socket
import time
from multiprocessing import Process
from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.tensorboard import SummaryWriter
from super_gradients.training.exceptions.dataset_exceptions import UnsupportedBatchItemsFormat
# TODO: These utils should move to sg_model package as internal (private) helper functions
def try_port(port):
"""
try_port - Helper method for tensorboard port binding
:param port:
:return:
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
is_port_available = False
try:
sock.bind(("localhost", port))
is_port_available = True
except Exception as ex:
print('Port ' + str(port) + ' is in use' + str(ex))
sock.close()
return is_port_available
def launch_tensorboard_process(checkpoints_dir_path: str, sleep_postpone: bool = True, port: int = None) -> Tuple[Process, int]:
"""
launch_tensorboard_process - Default behavior is to scan all free ports from 6006-6016 and try using them
unless port is defined by the user
:param checkpoints_dir_path:
:param sleep_postpone:
:param port:
:return: tuple of tb process, port
"""
logdir_path = str(Path(checkpoints_dir_path).parent.absolute())
tb_cmd = 'tensorboard --logdir=' + logdir_path + ' --bind_all'
if port is not None:
tb_ports = [port]
else:
tb_ports = range(6006, 6016)
for tb_port in tb_ports:
if not try_port(tb_port):
continue
else:
print('Starting Tensor-Board process on port: ' + str(tb_port))
tensor_board_process = Process(target=os.system, args=([tb_cmd + ' --port=' + str(tb_port)]))
tensor_board_process.daemon = True
tensor_board_process.start()
# LET THE TENSORBOARD PROCESS START
if sleep_postpone:
time.sleep(3)
return tensor_board_process, tb_port
# RETURNING IRRELEVANT VALUES
print('Failed to initialize Tensor-Board process on port: ' + ', '.join(map(str, tb_ports)))
return None, -1
def init_summary_writer(tb_dir, checkpoint_loaded, user_prompt=False):
"""Remove previous tensorboard files from directory and launch a tensor board process"""
# If the training is from scratch, Walk through destination folder and delete existing tensorboard logs
user = ''
if not checkpoint_loaded:
for filename in os.listdir(tb_dir):
if 'events' in filename:
if not user_prompt:
print('"{}" will not be deleted'.format(filename))
continue
while True:
# Verify with user before deleting old tensorboard files
user = input('\nOLDER TENSORBOARD FILES EXISTS IN EXPERIMENT FOLDER:\n"{}"\n'
'DO YOU WANT TO DELETE THEM? [y/n]'
.format(filename)) if (user != 'n' or user != 'y') else user
if user == 'y':
os.remove('{}/{}'.format(tb_dir, filename))
print('DELETED: {}!'.format(filename))
break
elif user == 'n':
print('"{}" will not be deleted'.format(filename))
break
print('Unknown answer...')
# Launch a tensorboard process
return SummaryWriter(tb_dir)
def add_log_to_file(filename, results_titles_list, results_values_list, epoch, max_epochs):
"""Add a message to the log file"""
# -Note: opening and closing the file every time is in-efficient. It is done for experimental purposes
with open(filename, 'a') as f:
f.write('\nEpoch (%d/%d) - ' % (epoch, max_epochs))
for result_title, result_value in zip(results_titles_list, results_values_list):
if isinstance(result_value, torch.Tensor):
result_value = result_value.item()
f.write(result_title + ': ' + str(result_value) + '\t')
def write_training_results(writer, results_titles_list, results_values_list, epoch):
"""Stores the training and validation loss and accuracy for current epoch in a tensorboard file"""
for res_key, res_val in zip(results_titles_list, results_values_list):
# USE ONLY LOWER-CASE LETTERS AND REPLACE SPACES WITH '_' TO AVOID MANY TITLES FOR THE SAME KEY
corrected_res_key = res_key.lower().replace(' ', '_')
writer.add_scalar(corrected_res_key, res_val, epoch)
writer.flush()
def write_hpms(writer, hpmstructs=[], special_conf={}):
"""Stores the training and dataset hyper params in the tensorboard file"""
hpm_string = ""
for hpm in hpmstructs:
for key, val in hpm.__dict__.items():
hpm_string += '{}: {} \n '.format(key, val)
for key, val in special_conf.items():
hpm_string += '{}: {} \n '.format(key, val)
writer.add_text("Hyper_parameters", hpm_string)
writer.flush()
# TODO: This should probably move into datasets/datasets_utils.py?
def unpack_batch_items(batch_items: Union[tuple, torch.Tensor]):
"""
Adds support for unpacking batch items in train/validation loop.
@param batch_items: (Union[tuple, torch.Tensor]) returned by the data loader, which is expected to be in one of
the following formats:
1. torch.Tensor or tuple, s.t inputs = batch_items[0], targets = batch_items[1] and len(batch_items) = 2
2. tuple: (inputs, targets, additional_batch_items)
where inputs are fed to the network, targets are their corresponding labels and additional_batch_items is a
dictionary (format {additional_batch_item_i_name: additional_batch_item_i ...}) which can be accessed through
the phase context under the attribute additional_batch_item_i_name, using a phase callback.
@return: inputs, target, additional_batch_items
"""
additional_batch_items = {}
if len(batch_items) == 2:
inputs, target = batch_items
elif len(batch_items) == 3:
inputs, target, additional_batch_items = batch_items
else:
raise UnsupportedBatchItemsFormat()
return inputs, target, additional_batch_items
def log_uncaught_exceptions(logger):
"""
Makes logger log uncaught exceptions
@param logger: logging.Logger
@return: None
"""
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
|
jenkins/test/validators/common.py | fahlmant/openshift-tools | 164 | 12608036 | ''' Provide common utils to validators '''
import subprocess
import sys
# Run cli command. By default, exit when an error occurs
def run_cli_cmd(cmd, exit_on_fail=True):
'''Run a command and return its output'''
print "> " + " ".join(cmd)
proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
if exit_on_fail:
print stdout
print "Unable to run " + " ".join(cmd) + " due to error: " + stderr
sys.exit(proc.returncode)
else:
return False, stdout
else:
return True, stdout
|
pokemongo_bot/test/polyline_generator_test.py | timgates42/PokemonGo-Bot | 5,362 | 12608106 | <reponame>timgates42/PokemonGo-Bot
import os
import pickle
import unittest
import requests_mock
from pokemongo_bot.walkers.polyline_generator import Polyline
ex_orig = (47.1706378, 8.5167405)
ex_dest = (47.1700271, 8.518072999999998)
ex_speed = 2.5
ex_total_distance = 194
ex_resp_directions = 'example_directions.pickle'
ex_resp_elevations = 'example_elevations.pickle'
ex_enc_polyline = 'o_%7C~Gsl~r@??h@LVDf@LDcBFi@AUEUQg@EKCI?G?GBG@EBEJKNC??'
ex_nr_samples = 64
class PolylineTestCase(unittest.TestCase):
def setUp(self):
directions_path = os.path.join(os.path.dirname(__file__), 'resources', ex_resp_directions)
with open(directions_path, 'rb') as directions:
ex_directions = pickle.load(directions)
elevations_path = os.path.join(os.path.dirname(__file__), 'resources', ex_resp_elevations)
with open(elevations_path, 'rb') as elevations:
ex_elevations = pickle.load(elevations)
with requests_mock.Mocker() as m:
m.get('https://maps.googleapis.com/maps/api/directions/json?mode=walking&origin={},{}&destination={},{}'.format(
ex_orig[0], ex_orig[1], ex_dest[0], ex_dest[1]
), json=ex_directions, status_code=200)
m.get('https://maps.googleapis.com/maps/api/elevation/json?path=enc:{}&samples={}'.format(
ex_enc_polyline, ex_nr_samples
), json=ex_elevations, status_code=200)
self.polyline = Polyline(ex_orig, ex_dest)
def test_first_point(self):
self.assertEqual(self.polyline._points[0], ex_orig)
def test_last_point(self):
self.assertEqual(self.polyline._points[-1], ex_dest)
def test_nr_of_elevations_returned(self):
total_seconds = self.polyline.get_total_distance() / 3
self.assertAlmostEqual(total_seconds, ex_nr_samples, places=0)
def test_total_distance(self):
self.assertEquals(self.polyline.get_total_distance(), ex_total_distance)
def test_get_last_pos(self):
self.assertEquals(self.polyline.get_last_pos(), self.polyline._last_pos)
|
solutions/dna_sequence_classification/quick_deploy/server/src/operations/load.py | kilianovski/bootcamp | 789 | 12608115 | <reponame>kilianovski/bootcamp
import sys
import pandas as pd
sys.path.append("..")
from config import DEFAULT_TABLE, KMER_K
from utils import build_kmers,train_vec
def seq_to_kmers(df_table):
# Function to replace sequence column with kmers in df_table
df_table['kmers'] = df_table.apply(lambda x: build_kmers(x['sequence'], KMER_K), axis =1)
df_table = df_table.drop(['sequence'],axis=1)
def get_vectors(df_table):
# Get lists of sequences in k-mers and labels in text from dataframe
seq_to_kmers(df_table)
words = list(df_table['kmers']) # list of all sequences in kmers
texts = []
for i in range(len(words)):
texts.append(' '.join(words[i]))
vectors = train_vec(texts)
return vectors
def format_data(ids, classes):
# Get lists of sequences in k-mers and labels in text from dataframe
data = []
for i in range(len(ids)):
value = (str(ids[i]), str(classes[i]))
data.append(value)
return data
def import_data(collection_name, file_dir, milvus_cli, mysql_cli):
# Import vectors to Milvus and data to Mysql respectively
if not collection_name:
collection_name = DEFAULT_TABLE
df = pd.read_table(file_dir)
# class_name = collection_name+'_class'
vectors = get_vectors(df)
ids = milvus_cli.insert(collection_name, vectors)
milvus_cli.create_index(collection_name)
mysql_cli.create_mysql_table(collection_name)
mysql_cli.load_data_to_mysql(collection_name, format_data(ids, df['class']))
return len(ids)
|
maro/rl/exploration/__init__.py | yangboz/maro | 598 | 12608139 | <filename>maro/rl/exploration/__init__.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_explorer import AbsExplorer
from .epsilon_greedy_explorer import EpsilonGreedyExplorer
from .noise_explorer import GaussianNoiseExplorer, NoiseExplorer, UniformNoiseExplorer
__all__ = ["AbsExplorer", "EpsilonGreedyExplorer", "GaussianNoiseExplorer", "NoiseExplorer", "UniformNoiseExplorer"]
|
tools/merge_checkpoints.py | Woffee/deformer | 114 | 12608145 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import re
import tensorflow as tf
def gen_variable_from_checkpoint(checkpoint):
for var_name, var_shape in tf.train.list_variables(checkpoint):
var = tf.train.load_variable(checkpoint, var_name)
if var_name.endswith('adam_m') or var_name.endswith('adam_v'):
continue
print('{}: {} in {}'.format(var_name, var_shape, checkpoint))
yield tf.Variable(var, name=var_name)
def main(args):
checkpoint_one = args.checkpoint_one
checkpoint_two = args.checkpoint_two
out_file = args.out_file
with tf.Session() as sess:
new_vars = []
for new_var in gen_variable_from_checkpoint(checkpoint_one):
new_vars.append(new_var)
if args.from_one:
upper_start = args.from_one_upper
one_var_names_map = dict()
one_var_names = []
for var_name, _ in tf.train.list_variables(checkpoint_one):
if var_name.endswith('adam_m') or var_name.endswith('adam_v'):
continue
one_var_names.append(var_name)
search_result = re.search(r'layer_.*', var_name)
if search_result:
layer_suffix = search_result.group()
one_var_names_map[layer_suffix] = var_name
if upper_start:
# only upper from checkpoint one,
# lower layers and other scope from checkpoint two
for var_name, _ in tf.train.list_variables(checkpoint_two):
if var_name.endswith('adam_m') or var_name.endswith('adam_v'):
continue
search_result = re.search(r'layer_.*', var_name)
if search_result:
layer_suffix = search_result.group()
layer_suffix = layer_suffix.replace('LayerNorm',
'layer_norm')
layer_str = re.search(r'layer_(\d+).*',
layer_suffix).group(1)
layer = int(layer_str)
if layer >= upper_start:
# use variable from checkpoint one
one_var_name = one_var_names_map[layer_suffix]
one_var = tf.train.load_variable(checkpoint_one,
one_var_name)
new_var_name = 'init_sbert/encoder/' + layer_suffix
print(new_var_name, 'using',
one_var_name, 'from checkpoint one')
new_var = tf.Variable(one_var, name=new_var_name)
new_vars.append(new_var)
else:
two_var = tf.train.load_variable(checkpoint_two,
var_name)
new_var_name = 'init_sbert/encoder/' + layer_suffix
print(new_var_name, 'using',
var_name, ' from checkpoint two')
new_var = tf.Variable(two_var, name=new_var_name)
new_vars.append(new_var)
else:
two_var = tf.train.load_variable(checkpoint_two,
var_name)
new_var_name = re.sub(r'.*bert/', 'init_sbert/',
var_name)
new_var_name = new_var_name.replace('LayerNorm',
'layer_norm')
print(new_var_name, 'using',
var_name, 'from checkpoint two')
new_var = tf.Variable(two_var, name=new_var_name)
new_vars.append(new_var)
else:
# all from checkpoint one
for one_var_name in one_var_names:
one_var = tf.train.load_variable(checkpoint_one,
one_var_name)
new_var_name = re.sub(r'.*encoder/', 'init_sbert/encoder/',
one_var_name)
new_var_name = re.sub(r'.*bert/', 'init_sbert/',
new_var_name)
new_var_name = new_var_name.replace('LayerNorm',
'layer_norm')
print(new_var_name, 'using',
one_var_name, 'from checkpoint one')
new_var = tf.Variable(one_var, name=new_var_name)
new_vars.append(new_var)
else:
for new_var in gen_variable_from_checkpoint(checkpoint_two):
new_vars.append(new_var)
print('saving weights to:', out_file)
if not args.dry_run:
# Save the variables
saver = tf.train.Saver(new_vars)
sess.run(tf.global_variables_initializer())
saver.save(sess, out_file, write_meta_graph=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c1', '--checkpoint_one', type=str)
parser.add_argument('-c2', '--checkpoint_two', type=str)
parser.add_argument('-o', '--out_file', type=str)
parser.add_argument("-dr", "--dry_run", action='store_true',
help="dry run renaming")
parser.add_argument("-fo", "--from_one", action='store_true',
help="init from checkpoint one")
parser.add_argument("-fou", "--from_one_upper", type=int, default=0,
help="init from checkpoint one upper layer")
main(parser.parse_args())
|
demo/explore_data.py | ashika6/keras-text-summarization | 296 | 12608171 | <reponame>ashika6/keras-text-summarization
import pandas as pd
from sklearn.model_selection import train_test_split
from keras_text_summarization.library.applications.fake_news_loader import fit_text
def main():
data_dir_path = './data'
# Import `fake_or_real_news.csv`
df = pd.read_csv(data_dir_path + "/fake_or_real_news.csv")
# Inspect shape of `df`
print(df.shape)
# Print first lines of `df`
print(df.head())
# Set index
df = df.set_index("Unnamed: 0")
# Print first lines of `df`
print(df.head())
# Set `y`
Y = df.title
X = df['text']
# Drop the `label` column
df.drop("title", axis=1)
# Make training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=53)
print('X train: ', X_train.shape)
print('Y train: ', y_train.shape)
config = fit_text(X, Y)
print('num_input_tokens: ', config['num_input_tokens'])
print('num_target_tokens: ', config['num_target_tokens'])
print('max_input_seq_length: ', config['max_input_seq_length'])
print('max_target_seq_length: ', config['max_target_seq_length'])
if __name__ == '__main__':
main()
|
fredapi/__init__.py | fadhilmch/fredapi | 492 | 12608183 | <filename>fredapi/__init__.py
from fredapi.version import version as __version__
from fredapi.fred import Fred
|
tests/test_pairwise_preference.py | mpkato/interleaving | 107 | 12608201 | import interleaving as il
from interleaving import PairwisePreferenceRanking
import json
from .test_methods import TestMethods
class TestPairwisePreference(TestMethods):
def test_interleave(self):
# Method Rankings Max length
# Possible interleaved rankings
self.interleave(il.PairwisePreference, [[1, 2], [2, 3]], 2,
[(1, 2), (2, 1), (1, 3), (2, 3)])
self.interleave(il.PairwisePreference, [[1, 2], [2, 3]], 3,
[(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1)])
self.interleave(il.PairwisePreference, [[1, 2], [2, 3]], 4,
[(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1)])
self.interleave(il.PairwisePreference, [[1, 2], [3, 4]], 2,
[(1, 2), (1, 3), (1, 4), (3, 1), (3, 2), (3, 4)])
self.interleave(il.PairwisePreference, [[1, 2], [3, 4]], 3,
[
(1, 2, 3), (1, 2, 4), (1, 3, 2), (1, 3, 4), (1, 4, 2), (1, 4, 3),
(3, 1, 2), (3, 1, 4), (3, 2, 1), (3, 2, 4), (3, 4, 1), (3, 4, 2),
])
# check lists
pp = il.PairwisePreference([[1, 2], [2, 3]])
res = pp.interleave()
assert tuple(res.lists[0]) == tuple([1, 2])
assert tuple(res.lists[1]) == tuple([2, 3])
def test_pairwise_preference_ranking(self):
pp = il.PairwisePreference([[1, 2, 3], [2, 3, 1]], sample_num=100)
rankings, _ = zip(*pp.ranking_distribution)
assert len(rankings) == 4
assert set(map(tuple, rankings))\
== set(map(tuple, [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1]]))
def test_dump(self, tmpdir):
tmpfile = str(tmpdir) + '/team_draft.json'
pp = il.PairwisePreference([[1, 2, 3], [2, 3, 1]], sample_num=100)
pp.dump_rankings(tmpfile)
with open(tmpfile, 'r') as f:
obj = json.load(f)
# Test keys
s = {str(hash(r)) for r in pp._rankings}
assert s == set(obj.keys())
# Test rankings
l1 = sorted(pp._rankings)
l2 = sorted([v['ranking']['ranking_list'] for v in obj.values()])
assert l1 == l2
# Test lists
l1 = [r.lists for r in pp._rankings]
l2 = [v['ranking']['lists'] for v in obj.values()]
assert l1 == l2
def test_multileave(self):
self.interleave(il.PairwisePreference, [[1, 2], [2, 3], [3, 4]], 2,
[
(1, 2), (1, 3), (1, 4),
(2, 1), (2, 3), (2, 4),
(3, 1), (3, 2), (3, 4),
])
self.interleave(il.PairwisePreference, [[1, 2], [3, 4], [5, 6]], 2,
[
(1, 2), (1, 3), (1, 4), (1, 5), (1, 6),
(3, 1), (3, 2), (3, 4), (3, 5), (3, 6),
(5, 1), (5, 2), (5, 3), (5, 4), (5, 6),
])
def test_find_preferences(self):
ranking = [0, 1, 2, 3, 4]
clicks = []
prefs = il.PairwisePreference._find_preferences(ranking, clicks)
assert set(prefs) == set()
clicks = [0]
prefs = il.PairwisePreference._find_preferences(ranking, clicks)
assert set(prefs) == set([(0, 1)])
clicks = [2]
prefs = il.PairwisePreference._find_preferences(ranking, clicks)
assert set(prefs) == set([
(2, 0), (2, 1), (2, 3)
])
clicks = [1, 3]
prefs = il.PairwisePreference._find_preferences(ranking, clicks)
assert set(prefs) == set([
(1, 0), (1, 2),
(3, 0), (3, 2), (3, 4)
])
clicks = [4]
prefs = il.PairwisePreference._find_preferences(ranking, clicks)
assert set(prefs) == set([
(4, 0), (4, 1), (4, 2), (4, 3)
])
def test_compute_scores(self):
rankings = [
[0, 1, 2, 3, 4],
[0, 1, 2, 4, 3],
[2, 1, 4, 0, 3],
]
multileaved_ranking = [0, 1, 2, 3, 4]
ranking = PairwisePreferenceRanking(rankings, multileaved_ranking)
scores = il.PairwisePreference.compute_scores(ranking, [0])
assert scores[0] == 0.0
assert scores[1] == 0.0
assert scores[2] == 0.0
scores = il.PairwisePreference.compute_scores(ranking, [1])
# (1, 0), (1, 2)
w = 1 - 1/2
assert scores[0] == 1 / w
assert scores[1] == 1 / w
assert scores[2] == - 1 / w
scores = il.PairwisePreference.compute_scores(ranking, [2])
# (2, 0), (2, 1), (2, 3)
w20 = 1
w21 = 1 - 1/2
assert scores[0] == - w20 - 1 / w21
assert scores[1] == - w20 - 1 / w21
assert scores[2] == w20 + 1 / w21
scores = il.PairwisePreference.compute_scores(ranking, [1, 2])
# (1, 0), (2, 0), (2, 3)
assert scores[0] == -1
assert scores[1] == -1
assert scores[2] == 1
def test_find_highest_rank_for_all(self):
rankings = [
[0, 1, 2, 3, 4],
[4, 3, 2, 1, 0],
]
rank = il.PairwisePreference._find_highest_rank_for_all(rankings, 0, 1)
assert rank == 1
rank = il.PairwisePreference._find_highest_rank_for_all(rankings, 1, 2)
assert rank == 2
rank = il.PairwisePreference._find_highest_rank_for_all(rankings, 0, 4)
assert rank == 0
def test_find_highest_rank_for_any(self):
rankings = [
[0, 1, 2, 3, 4],
[4, 3, 2, 1, 0],
]
rank = il.PairwisePreference._find_highest_rank_for_any(rankings, 0, 1)
assert rank == 0
rank = il.PairwisePreference._find_highest_rank_for_any(rankings, 1, 2)
assert rank == 1
rank = il.PairwisePreference._find_highest_rank_for_any(rankings, 0, 4)
assert rank == 0
def test_find_highest_rank_for_ranking(self):
ranking = [0, 1, 2, 3, 4]
rank = il.PairwisePreference._find_highest_rank_for_ranking(ranking, 0, 1)
assert rank == 0
rank = il.PairwisePreference._find_highest_rank_for_ranking(ranking, 1, 2)
assert rank == 1
rank = il.PairwisePreference._find_highest_rank_for_ranking(ranking, 2, 4)
assert rank == 2
def test_get_rank(self):
ranking = [0, 1, 2, 3, 4]
rank = il.PairwisePreference._get_rank(ranking, 2)
assert rank == 2
rank = il.PairwisePreference._get_rank(ranking, 4)
assert rank == 4
rank = il.PairwisePreference._get_rank(ranking, 6)
assert rank == 5
def test_compute_probability(self):
rankings = [
[0, 1, 2, 3, 4],
[4, 3, 2, 1, 0],
]
sup, inf = 2, 1
r_above = il.PairwisePreference._find_highest_rank_for_all(rankings, sup, inf)
assert r_above == 2
w = il.PairwisePreference._compute_probability(r_above, rankings, sup, inf)
ideal_w = 1 - 1 / (4 - 1)
assert w == ideal_w
rankings = [
[0, 1, 2, 3, 4],
[0, 1, 2, 4, 3],
[2, 1, 4, 0, 3],
]
sup, inf = 1, 3
r_above = il.PairwisePreference._find_highest_rank_for_all(rankings, sup, inf)
assert r_above == 3
w = il.PairwisePreference._compute_probability(r_above, rankings, sup, inf)
ideal_w = (1 - 1 / (3 - 1)) * (1 - 1 / (4 - 2))
assert w == ideal_w
|
playbooks/internal_host_winrm_investigate.py | arjunkhunti-crest/security_content | 348 | 12608216 | """
Published in response to CVE-2021-44228, this playbook performs a general investigation on key aspects of a windows device using windows remote management. Important files related to the endpoint are generated, bundled into a zip, and copied to the container vault.
"""
import phantom.rules as phantom
import json
from datetime import datetime, timedelta
def on_start(container):
phantom.debug('on_start() called')
# call 'list_processes' block
list_processes(container=container)
return
def run_data_collect_script(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("run_data_collect_script() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
################################################################################
# Enumerates autoruns, installed programs, listening network connections, running
# processes, registered services, scheduled tasks, local users, and local groups.
# It then exports to CSV. Finally, all information is exported to zip.
################################################################################
playbook_input_ip_or_hostname = phantom.collect2(container=container, datapath=["playbook_input:ip_or_hostname"])
format_data_collect_script__as_list = phantom.get_format_data(name="format_data_collect_script__as_list")
parameters = []
# build parameters list for 'run_data_collect_script' call
for playbook_input_ip_or_hostname_item in playbook_input_ip_or_hostname:
parameters.append({
"script_str": format_data_collect_script__as_list,
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code Start
################################################################################
parameters = []
# build parameters list for 'run_data_collect_script' call
for playbook_input_ip_or_hostname_item, formatted_part in zip(playbook_input_ip_or_hostname, format_data_collect_script__as_list):
parameters.append({
"script_str": formatted_part,
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code End
################################################################################
phantom.act("run script", parameters=parameters, name="run_data_collect_script", assets=["winrm"], callback=format_zip)
return
def get_zip(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("get_zip() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
################################################################################
# Fetches the zip created by the data capture script and uploads to vault.
################################################################################
playbook_input_ip_or_hostname = phantom.collect2(container=container, datapath=["playbook_input:ip_or_hostname"])
format_zip__as_list = phantom.get_format_data(name="format_zip__as_list")
parameters = []
# build parameters list for 'get_zip' call
for playbook_input_ip_or_hostname_item in playbook_input_ip_or_hostname:
if format_zip__as_list is not None:
parameters.append({
"file_path": format_zip__as_list,
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code Start
################################################################################
parameters = []
# build parameters list for 'get_zip' call
for playbook_input_ip_or_hostname_item, formatted_part in zip(playbook_input_ip_or_hostname, format_zip__as_list):
parameters.append({
"ip_hostname": playbook_input_ip_or_hostname_item[0],
"file_path": formatted_part,
})
################################################################################
## Custom Code End
################################################################################
phantom.act("get file", parameters=parameters, name="get_zip", assets=["winrm"], callback=format_file_removal)
return
def remove_data_capture_files(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("remove_data_capture_files() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
################################################################################
# Removes the temporary files created by the data collection script
################################################################################
playbook_input_ip_or_hostname = phantom.collect2(container=container, datapath=["playbook_input:ip_or_hostname"])
format_file_removal__as_list = phantom.get_format_data(name="format_file_removal__as_list")
parameters = []
# build parameters list for 'remove_data_capture_files' call
for playbook_input_ip_or_hostname_item in playbook_input_ip_or_hostname:
parameters.append({
"script_str": format_file_removal__as_list,
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code Start
################################################################################
parameters = []
# build parameters list for 'remove_data_capture_files' call
for playbook_input_ip_or_hostname_item, formatted_part in zip(playbook_input_ip_or_hostname, format_file_removal__as_list):
parameters.append({
"script_str": formatted_part,
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code End
################################################################################
phantom.act("run script", parameters=parameters, name="remove_data_capture_files", assets=["winrm"])
return
def format_data_collect_script(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("format_data_collect_script() called")
template = """%%\n$ProgressPreference = 'SilentlyContinue'; Get-CimInstance -ClassName Win32_StartupCommand | Export-Csv -Path .\\{0}-SOARFetch-Autorun.csv -NoType; Get-ItemProperty HKLM:\\Software\\Wow6432Node\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\* | Export-Csv -Path .\\{0}-SOARFetch-InstalledPrograms.csv -NoType; Get-NetTCPConnection -State Listen | Export-Csv -Path .\\{0}-SOARFetch-NetworkConnections.csv -NoType; Get-Process -IncludeUserName | Export-Csv -Path .\\{0}-SOARFetch-Processes.csv -NoType; Get-Service | Export-Csv -Path .\\{0}-SOARFetch-Services.csv -NoType; Get-ScheduledTask | Export-Csv -Path .\\{0}-SOARFetch-ScheduledTasks.csv -NoType; Get-LocalUser | Export-Csv -Path .\\{0}-SOARFetch-Users.csv -NoType; Get-LocalGroup | Export-Csv -Path .\\{0}-SOARFetch-Groups.csv -NoType; Compress-Archive -Path .\\{0}-SOARFetch* .\\{0}-SOARFetch.zip; \n%%"""
# parameter list for template variable replacement
parameters = [
"playbook_input:ip_or_hostname"
]
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.format(container=container, template=template, parameters=parameters, name="format_data_collect_script")
run_data_collect_script(container=container)
return
def list_processes(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("list_processes() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
################################################################################
# List running processes
################################################################################
playbook_input_ip_or_hostname = phantom.collect2(container=container, datapath=["playbook_input:ip_or_hostname"])
parameters = []
# build parameters list for 'list_processes' call
for playbook_input_ip_or_hostname_item in playbook_input_ip_or_hostname:
parameters.append({
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.act("list processes", parameters=parameters, name="list_processes", assets=["winrm"], callback=list_connections)
return
def list_connections(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("list_connections() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
################################################################################
# List current connections
################################################################################
playbook_input_ip_or_hostname = phantom.collect2(container=container, datapath=["playbook_input:ip_or_hostname"])
parameters = []
# build parameters list for 'list_connections' call
for playbook_input_ip_or_hostname_item in playbook_input_ip_or_hostname:
parameters.append({
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.act("list connections", parameters=parameters, name="list_connections", assets=["winrm"], callback=list_sessions)
return
def list_sessions(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("list_sessions() called")
# phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
################################################################################
# List active sessions
################################################################################
playbook_input_ip_or_hostname = phantom.collect2(container=container, datapath=["playbook_input:ip_or_hostname"])
parameters = []
# build parameters list for 'list_sessions' call
for playbook_input_ip_or_hostname_item in playbook_input_ip_or_hostname:
parameters.append({
"ip_hostname": playbook_input_ip_or_hostname_item[0],
})
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.act("list sessions", parameters=parameters, name="list_sessions", assets=["winrm"], callback=format_data_collect_script)
return
def format_zip(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("format_zip() called")
################################################################################
# Format a dynamic string where the ZIP is located.
################################################################################
template = """%%\n.\\{0}-SOARFetch.zip\n%%"""
# parameter list for template variable replacement
parameters = [
"playbook_input:ip_or_hostname"
]
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.format(container=container, template=template, parameters=parameters, name="format_zip")
get_zip(container=container)
return
def format_file_removal(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
phantom.debug("format_file_removal() called")
################################################################################
# Format dynamic string for file removal
################################################################################
template = """%%\nRemove-Item -Path .\\{0}-SOARFetch*\n%%"""
# parameter list for template variable replacement
parameters = [
"playbook_input:ip_or_hostname"
]
################################################################################
## Custom Code Start
################################################################################
# Write your custom code here...
################################################################################
## Custom Code End
################################################################################
phantom.format(container=container, template=template, parameters=parameters, name="format_file_removal")
remove_data_capture_files(container=container)
return
def on_finish(container, summary):
phantom.debug("on_finish() called")
################################################################################
## Custom Code Start
################################################################################
# This function is called after all actions are completed.
# summary of all the action and/or all details of actions
# can be collected here.
# summary_json = phantom.get_summary()
# if 'result' in summary_json:
# for action_result in summary_json['result']:
# if 'action_run_id' in action_result:
# action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False)
# phantom.debug(action_results)
################################################################################
## Custom Code End
################################################################################
return |
claf/modules/encoder/lstm_cell_with_projection.py | GMDennis/claf | 225 | 12608224 | <reponame>GMDennis/claf
"""
This code is from allenai/allennlp
(https://github.com/allenai/allennlp/blob/master/allennlp/modules/lstm_cell_with_projection.py)
"""
import itertools
from typing import Callable, List, Tuple, Union, Optional
import torch
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
class LstmCellWithProjection(torch.nn.Module): # pragma: no cover
"""
An LSTM with Recurrent Dropout and a projected and clipped hidden state and
memory. Note: this implementation is slower than the native Pytorch LSTM because
it cannot make use of CUDNN optimizations for stacked RNNs due to and
variational dropout and the custom nature of the cell state.
Parameters
----------
input_size : ``int``, required.
The dimension of the inputs to the LSTM.
hidden_size : ``int``, required.
The dimension of the outputs of the LSTM.
cell_size : ``int``, required.
The dimension of the memory cell used for the LSTM.
go_forward: ``bool``, optional (default = True)
The direction in which the LSTM is applied to the sequence.
Forwards by default, or backwards if False.
recurrent_dropout_probability: ``float``, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
<https://arxiv.org/abs/1512.05287>`_ . Implementation wise, this simply
applies a fixed dropout mask per sequence to the recurrent connection of the
LSTM.
state_projection_clip_value: ``float``, optional, (default = None)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value: ``float``, optional, (default = None)
The magnitude with which to clip the memory cell.
Returns
-------
output_accumulator : ``torch.FloatTensor``
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
final_state: ``Tuple[torch.FloatTensor, torch.FloatTensor]``
The final (state, memory) states of the LSTM, with shape
(1, batch_size, hidden_size) and (1, batch_size, cell_size)
respectively. The first dimension is 1 in order to match the Pytorch
API for returning stacked LSTM states.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
cell_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None,
) -> None:
super(LstmCellWithProjection, self).__init__()
# Required to be wrapped with a :class:`PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.cell_size = cell_size
self.go_forward = go_forward
self.state_projection_clip_value = state_projection_clip_value
self.memory_cell_clip_value = memory_cell_clip_value
self.recurrent_dropout_probability = recurrent_dropout_probability
# We do the projections for all the gates all at once.
self.input_linearity = torch.nn.Linear(input_size, 4 * cell_size, bias=False)
self.state_linearity = torch.nn.Linear(hidden_size, 4 * cell_size, bias=True)
# Additional projection matrix for making the hidden state smaller.
self.state_projection = torch.nn.Linear(cell_size, hidden_size, bias=False)
self.reset_parameters()
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.cell_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data, [self.cell_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.cell_size : 2 * self.cell_size].fill_(1.0)
def forward(
self, # pylint: disable=arguments-differ
inputs: torch.FloatTensor,
batch_lengths: List[int],
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
):
"""
Parameters
----------
inputs : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
batch_lengths : ``List[int]``, required.
A list of length batch_size containing the lengths of the sequences in batch.
initial_state : ``Tuple[torch.Tensor, torch.Tensor]``, optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. The ``state`` has shape (1, batch_size, hidden_size) and the
``memory`` has shape (1, batch_size, cell_size).
Returns
-------
output_accumulator : ``torch.FloatTensor``
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
final_state : ``Tuple[``torch.FloatTensor, torch.FloatTensor]``
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. The ``state`` has shape (1, batch_size, hidden_size) and the
``memory`` has shape (1, batch_size, cell_size).
"""
batch_size = inputs.size()[0]
total_timesteps = inputs.size()[1]
output_accumulator = inputs.new_zeros(batch_size, total_timesteps, self.hidden_size)
if initial_state is None:
full_batch_previous_memory = inputs.new_zeros(batch_size, self.cell_size)
full_batch_previous_state = inputs.new_zeros(batch_size, self.hidden_size)
else:
full_batch_previous_state = initial_state[0].squeeze(0)
full_batch_previous_memory = initial_state[1].squeeze(0)
current_length_index = batch_size - 1 if self.go_forward else 0
if self.recurrent_dropout_probability > 0.0 and self.training:
dropout_mask = get_dropout_mask(
self.recurrent_dropout_probability, full_batch_previous_state
)
else:
dropout_mask = None
for timestep in range(total_timesteps):
# The index depends on which end we start.
index = timestep if self.go_forward else total_timesteps - timestep - 1
# What we are doing here is finding the index into the batch dimension
# which we need to use for this timestep, because the sequences have
# variable length, so once the index is greater than the length of this
# particular batch sequence, we no longer need to do the computation for
# this sequence. The key thing to recognise here is that the batch inputs
# must be _ordered_ by length from longest (first in batch) to shortest
# (last) so initially, we are going forwards with every sequence and as we
# pass the index at which the shortest elements of the batch finish,
# we stop picking them up for the computation.
if self.go_forward:
while batch_lengths[current_length_index] <= index:
current_length_index -= 1
# If we're going backwards, we are _picking up_ more indices.
else:
# First conditional: Are we already at the maximum number of elements in the batch?
# Second conditional: Does the next shortest sequence beyond the current batch
# index require computation use this timestep?
while (
current_length_index < (len(batch_lengths) - 1)
and batch_lengths[current_length_index + 1] > index
):
current_length_index += 1
# Actually get the slices of the batch which we
# need for the computation at this timestep.
# shape (batch_size, cell_size)
previous_memory = full_batch_previous_memory[0 : current_length_index + 1].clone()
# Shape (batch_size, hidden_size)
previous_state = full_batch_previous_state[0 : current_length_index + 1].clone()
# Shape (batch_size, input_size)
timestep_input = inputs[0 : current_length_index + 1, index]
# Do the projections for all the gates all at once.
# Both have shape (batch_size, 4 * cell_size)
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(
projected_input[:, (0 * self.cell_size) : (1 * self.cell_size)]
+ projected_state[:, (0 * self.cell_size) : (1 * self.cell_size)]
)
forget_gate = torch.sigmoid(
projected_input[:, (1 * self.cell_size) : (2 * self.cell_size)]
+ projected_state[:, (1 * self.cell_size) : (2 * self.cell_size)]
)
memory_init = torch.tanh(
projected_input[:, (2 * self.cell_size) : (3 * self.cell_size)]
+ projected_state[:, (2 * self.cell_size) : (3 * self.cell_size)]
)
output_gate = torch.sigmoid(
projected_input[:, (3 * self.cell_size) : (4 * self.cell_size)]
+ projected_state[:, (3 * self.cell_size) : (4 * self.cell_size)]
)
memory = input_gate * memory_init + forget_gate * previous_memory
# Here is the non-standard part of this LSTM cell; first, we clip the
# memory cell, then we project the output of the timestep to a smaller size
# and again clip it.
if self.memory_cell_clip_value:
# pylint: disable=invalid-unary-operand-type
memory = torch.clamp(
memory, -self.memory_cell_clip_value, self.memory_cell_clip_value
)
# shape (current_length_index, cell_size)
pre_projection_timestep_output = output_gate * torch.tanh(memory)
# shape (current_length_index, hidden_size)
timestep_output = self.state_projection(pre_projection_timestep_output)
if self.state_projection_clip_value:
# pylint: disable=invalid-unary-operand-type
timestep_output = torch.clamp(
timestep_output,
-self.state_projection_clip_value,
self.state_projection_clip_value,
)
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None:
timestep_output = timestep_output * dropout_mask[0 : current_length_index + 1]
# We've been doing computation with less than the full batch, so here we create a new
# variable for the the whole batch at this timestep and insert the result for the
# relevant elements of the batch into it.
full_batch_previous_memory = full_batch_previous_memory.clone()
full_batch_previous_state = full_batch_previous_state.clone()
full_batch_previous_memory[0 : current_length_index + 1] = memory
full_batch_previous_state[0 : current_length_index + 1] = timestep_output
output_accumulator[0 : current_length_index + 1, index] = timestep_output
# Mimic the pytorch API by returning state in the following shape:
# (num_layers * num_directions, batch_size, ...). As this
# LSTM cell cannot be stacked, the first dimension here is just 1.
final_state = (
full_batch_previous_state.unsqueeze(0),
full_batch_previous_memory.unsqueeze(0),
)
return output_accumulator, final_state
def get_dropout_mask(
dropout_probability: float, tensor_for_masking: torch.Tensor
): # pragma: no cover
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Tensor, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = tensor_for_masking.new_tensor(
torch.rand(tensor_for_masking.size()) > dropout_probability
)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def block_orthogonal(
tensor: torch.Tensor, split_sizes: List[int], gain: float = 1.0
) -> None: # pragma: no cover
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent models which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
Parameters
----------
tensor : ``torch.Tensor``, required.
A tensor to initialize.
split_sizes : List[int], required.
A list of length ``tensor.ndim()`` specifying the size of the
blocks along that particular dimension. E.g. ``[10, 20]`` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : float, optional (default = 1.0)
The gain (scaling) applied to the orthogonal initialization.
"""
data = tensor.data
sizes = list(tensor.size())
if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
raise ValueError(
"tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes)
)
indexes = [list(range(0, max_size, split)) for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple(
[slice(start_index, start_index + step) for start_index, step in index_and_step_tuples]
)
data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor): # pragma: no cover
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ValueError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
# We have two types here for the state, because storing the state in something
# which is Iterable (like a tuple, below), is helpful for internal manipulation
# - however, the states are consumed as either Tensors or a Tuple of Tensors, so
# returning them in this format is unhelpful.
RnnState = Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] # pylint: disable=invalid-name
RnnStateStorage = Tuple[torch.Tensor, ...] # pylint: disable=invalid-name
class _EncoderBase(torch.nn.Module): # pragma: no cover
# pylint: disable=abstract-method
"""
This abstract class serves as a base for the 3 ``Encoder`` abstractions in AllenNLP.
- :class:`~allennlp.modules.seq2seq_encoders.Seq2SeqEncoders`
- :class:`~allennlp.modules.seq2vec_encoders.Seq2VecEncoders`
Additionally, this class provides functionality for sorting sequences by length
so they can be consumed by Pytorch RNN classes, which require their inputs to be
sorted by length. Finally, it also provides optional statefulness to all of it's
subclasses by allowing the caching and retrieving of the hidden states of RNNs.
"""
def __init__(self, stateful: bool = False) -> None:
super(_EncoderBase, self).__init__()
self.stateful = stateful
self._states: Optional[RnnStateStorage] = None
def sort_and_run_forward(
self,
module: Callable[
[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState],
],
inputs: torch.Tensor,
mask: torch.Tensor,
hidden_state: Optional[RnnState] = None,
):
"""
This function exists because Pytorch RNNs require that their inputs be sorted
before being passed as input. As all of our Seq2xxxEncoders use this functionality,
it is provided in a base class. This method can be called on any module which
takes as input a ``PackedSequence`` and some ``hidden_state``, which can either be a
tuple of tensors or a tensor.
As all of our Seq2xxxEncoders have different return types, we return `sorted`
outputs from the module, which is called directly. Additionally, we return the
indices into the batch dimension required to restore the tensor to it's correct,
unsorted order and the number of valid batch elements (i.e the number of elements
in the batch which are not completely masked). This un-sorting and re-padding
of the module outputs is left to the subclasses because their outputs have different
types and handling them smoothly here is difficult.
Parameters
----------
module : ``Callable[[PackedSequence, Optional[RnnState]],
Tuple[Union[PackedSequence, torch.Tensor], RnnState]]``, required.
A function to run on the inputs. In most cases, this is a ``torch.nn.Module``.
inputs : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length, embedding_size)`` representing
the inputs to the Encoder.
mask : ``torch.Tensor``, required.
A tensor of shape ``(batch_size, sequence_length)``, representing masked and
non-masked elements of the sequence for each element in the batch.
hidden_state : ``Optional[RnnState]``, (default = None).
A single tensor of shape (num_layers, batch_size, hidden_size) representing the
state of an RNN with or a tuple of
tensors of shapes (num_layers, batch_size, hidden_size) and
(num_layers, batch_size, memory_size), representing the hidden state and memory
state of an LSTM-like RNN.
Returns
-------
module_output : ``Union[torch.Tensor, PackedSequence]``.
A Tensor or PackedSequence representing the output of the Pytorch Module.
The batch size dimension will be equal to ``num_valid``, as sequences of zero
length are clipped off before the module is called, as Pytorch cannot handle
zero length sequences.
final_states : ``Optional[RnnState]``
A Tensor representing the hidden state of the Pytorch Module. This can either
be a single tensor of shape (num_layers, num_valid, hidden_size), for instance in
the case of a GRU, or a tuple of tensors, such as those required for an LSTM.
restoration_indices : ``torch.LongTensor``
A tensor of shape ``(batch_size,)``, describing the re-indexing required to transform
the outputs back to their original batch order.
"""
# In some circumstances you may have sequences of zero length. ``pack_padded_sequence``
# requires all sequence lengths to be > 0, so remove sequences of zero length before
# calling self._module, then fill with zeros.
# First count how many sequences are empty.
batch_size = mask.size(0)
num_valid = torch.sum(mask[:, 0]).int().item()
sequence_lengths = mask.long().sum(-1)
sorted_inputs, sorted_sequence_lengths, restoration_indices, sorting_indices = sort_batch_by_length(
inputs, sequence_lengths
)
# Now create a PackedSequence with only the non-empty, sorted sequences.
packed_sequence_input = pack_padded_sequence(
sorted_inputs[:num_valid, :, :],
sorted_sequence_lengths[:num_valid].data.tolist(),
batch_first=True,
)
# Prepare the initial states.
if not self.stateful:
if hidden_state is None:
initial_states = hidden_state
elif isinstance(hidden_state, tuple):
initial_states = [
state.index_select(1, sorting_indices)[:, :num_valid, :].contiguous()
for state in hidden_state
]
else:
initial_states = hidden_state.index_select(1, sorting_indices)[
:, :num_valid, :
].contiguous()
else:
initial_states = self._get_initial_states(batch_size, num_valid, sorting_indices)
# Actually call the module on the sorted PackedSequence.
module_output, final_states = module(packed_sequence_input, initial_states)
return module_output, final_states, restoration_indices
def _get_initial_states(
self, batch_size: int, num_valid: int, sorting_indices: torch.LongTensor
) -> Optional[RnnState]:
"""
Returns an initial state for use in an RNN. Additionally, this method handles
the batch size changing across calls by mutating the state to append initial states
for new elements in the batch. Finally, it also handles sorting the states
with respect to the sequence lengths of elements in the batch and removing rows
which are completely padded. Importantly, this `mutates` the state if the
current batch size is larger than when it was previously called.
Parameters
----------
batch_size : ``int``, required.
The batch size can change size across calls to stateful RNNs, so we need
to know if we need to expand or shrink the states before returning them.
Expanded states will be set to zero.
num_valid : ``int``, required.
The batch may contain completely padded sequences which get removed before
the sequence is passed through the encoder. We also need to clip these off
of the state too.
sorting_indices ``torch.LongTensor``, required.
Pytorch RNNs take sequences sorted by length. When we return the states to be
used for a given call to ``module.forward``, we need the states to match up to
the sorted sequences, so before returning them, we sort the states using the
same indices used to sort the sequences.
Returns
-------
This method has a complex return type because it has to deal with the first time it
is called, when it has no state, and the fact that types of RNN have heterogeneous
states.
If it is the first time the module has been called, it returns ``None``, regardless
of the type of the ``Module``.
Otherwise, for LSTMs, it returns a tuple of ``torch.Tensors`` with shape
``(num_layers, num_valid, state_size)`` and ``(num_layers, num_valid, memory_size)``
respectively, or for GRUs, it returns a single ``torch.Tensor`` of shape
``(num_layers, num_valid, state_size)``.
"""
# We don't know the state sizes the first time calling forward,
# so we let the module define what it's initial hidden state looks like.
if self._states is None:
return None
# Otherwise, we have some previous states.
if batch_size > self._states[0].size(1):
# This batch is larger than the all previous states.
# If so, resize the states.
num_states_to_concat = batch_size - self._states[0].size(1)
resized_states = []
# state has shape (num_layers, batch_size, hidden_size)
for state in self._states:
# This _must_ be inside the loop because some
# RNNs have states with different last dimension sizes.
zeros = state.new_zeros(state.size(0), num_states_to_concat, state.size(2))
resized_states.append(torch.cat([state, zeros], 1))
self._states = tuple(resized_states)
correctly_shaped_states = self._states
elif batch_size < self._states[0].size(1):
# This batch is smaller than the previous one.
correctly_shaped_states = tuple(state[:, :batch_size, :] for state in self._states)
else:
correctly_shaped_states = self._states
# At this point, our states are of shape (num_layers, batch_size, hidden_size).
# However, the encoder uses sorted sequences and additionally removes elements
# of the batch which are fully padded. We need the states to match up to these
# sorted and filtered sequences, so we do that in the next two blocks before
# returning the state/s.
if len(self._states) == 1:
# GRUs only have a single state. This `unpacks` it from the
# tuple and returns the tensor directly.
correctly_shaped_state = correctly_shaped_states[0]
sorted_state = correctly_shaped_state.index_select(1, sorting_indices)
return sorted_state[:, :num_valid, :]
else:
# LSTMs have a state tuple of (state, memory).
sorted_states = [
state.index_select(1, sorting_indices) for state in correctly_shaped_states
]
return tuple(state[:, :num_valid, :] for state in sorted_states)
def _update_states(
self, final_states: RnnStateStorage, restoration_indices: torch.LongTensor
) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
"""
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices) for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [
(state[0, :, :].sum(-1) != 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states
]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(
self._states, new_unsorted_states, used_new_rows_mask
):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(
self._states, new_unsorted_states, used_new_rows_mask
):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
def reset_states(self):
self._states = None
|
dev-ops/locust/locustfile.py | zj85/shopwarePluginSelectoer | 203 | 12608229 | <filename>dev-ops/locust/locustfile.py<gh_stars>100-1000
import requests
import time
import csv
import os
import random
import uuid
import json
from locust import HttpUser, task, between, constant
class Purchaser(HttpUser):
weight = 10
wait_time = constant(15)
countryId = 1
salutationId = 1
def on_start(self):
self.initRegister()
self.register()
def initRegister(self):
path = os.path.dirname(os.path.realpath(__file__)) + '/fixtures/register.json'
with open(path) as file:
data = json.load(file)
self.countryId = data['countryId']
self.salutationId = data['salutationId']
def register(self):
register = {
'redirectTo': 'frontend.account.home.page',
'salutationId': self.salutationId,
'firstName': 'Firstname',
'lastName': 'Lastname',
'email': 'user-' + str(uuid.uuid4()).replace('-', '') + '@example.com',
'password': '<PASSWORD>',
'billingAddress[street]': 'Test street',
'billingAddress[zipcode]': '11111',
'billingAddress[city]': 'Test city',
'billingAddress[countryId]': self.countryId
}
self.client.post('/account/register', data=register, name='register')
def addProduct(self):
number = random.choice(numbers)
self.client.post('/checkout/product/add-by-number', name='add-product', data={
'redirectTo': 'frontend.checkout.cart.page',
'number': number
})
@task
def order(self):
url = random.choice(listings)
self.client.get(url, name='listing-page-logged-in')
self.client.get('/widgets/checkout/info', name='cart-widget')
count = random.randint(1, 5)
for i in range(1,count+1):
self.addProduct()
self.client.get('/checkout/cart', name='cart-page')
self.client.get('/checkout/confirm', name='confirm-page')
self.client.post('/checkout/order', name='order', data={
'tos': 'on'
})
class Surfer(HttpUser):
weight = 30
wait_time = constant(2)
@task(10)
def listing_page(self):
url = random.choice(listings)
self.client.get(url, name='listing-page')
self.client.get('/widgets/checkout/info', name='cart-widget')
@task(4)
def detail_page(self):
url = random.choice(details)
self.client.get(url, name='detail-page')
self.client.get('/widgets/checkout/info', name='cart-widget')
listings = []
details = []
numbers = []
def initListings():
path = os.path.dirname(os.path.realpath(__file__)) + '/fixtures/listing_urls.csv'
with open(path) as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
listings.append(row[0])
def initProducts():
path = os.path.dirname(os.path.realpath(__file__)) + '/fixtures/product_urls.csv'
with open(path) as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
details.append(row[0])
def initNumbers():
path = os.path.dirname(os.path.realpath(__file__)) + '/fixtures/product_numbers.csv'
with open(path) as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
numbers.append(row[0])
initListings()
initProducts()
initNumbers()
|
yinyang/src/parsing/SMTLIBv2Listener.py | rainoftime/yinyang | 143 | 12608260 | <filename>yinyang/src/parsing/SMTLIBv2Listener.py
# Generated from SMTLIBv2.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .SMTLIBv2Parser import SMTLIBv2Parser
else:
from SMTLIBv2Parser import SMTLIBv2Parser
# This class defines a complete listener for a parse tree produced by SMTLIBv2Parser.
class SMTLIBv2Listener(ParseTreeListener):
# Enter a parse tree produced by SMTLIBv2Parser#start.
def enterStart(self, ctx: SMTLIBv2Parser.StartContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#start.
def exitStart(self, ctx: SMTLIBv2Parser.StartContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#response.
def enterResponse(self, ctx: SMTLIBv2Parser.ResponseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#response.
def exitResponse(self, ctx: SMTLIBv2Parser.ResponseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#generalReservedWord.
def enterGeneralReservedWord(self, ctx: SMTLIBv2Parser.GeneralReservedWordContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#generalReservedWord.
def exitGeneralReservedWord(self, ctx: SMTLIBv2Parser.GeneralReservedWordContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#simpleSymbol.
def enterSimpleSymbol(self, ctx: SMTLIBv2Parser.SimpleSymbolContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#simpleSymbol.
def exitSimpleSymbol(self, ctx: SMTLIBv2Parser.SimpleSymbolContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#quotedSymbol.
def enterQuotedSymbol(self, ctx: SMTLIBv2Parser.QuotedSymbolContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#quotedSymbol.
def exitQuotedSymbol(self, ctx: SMTLIBv2Parser.QuotedSymbolContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#predefSymbol.
def enterPredefSymbol(self, ctx: SMTLIBv2Parser.PredefSymbolContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#predefSymbol.
def exitPredefSymbol(self, ctx: SMTLIBv2Parser.PredefSymbolContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#predefKeyword.
def enterPredefKeyword(self, ctx: SMTLIBv2Parser.PredefKeywordContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#predefKeyword.
def exitPredefKeyword(self, ctx: SMTLIBv2Parser.PredefKeywordContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#symbol.
def enterSymbol(self, ctx: SMTLIBv2Parser.SymbolContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#symbol.
def exitSymbol(self, ctx: SMTLIBv2Parser.SymbolContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#numeral.
def enterNumeral(self, ctx: SMTLIBv2Parser.NumeralContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#numeral.
def exitNumeral(self, ctx: SMTLIBv2Parser.NumeralContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#decimal.
def enterDecimal(self, ctx: SMTLIBv2Parser.DecimalContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#decimal.
def exitDecimal(self, ctx: SMTLIBv2Parser.DecimalContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#hexadecimal.
def enterHexadecimal(self, ctx: SMTLIBv2Parser.HexadecimalContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#hexadecimal.
def exitHexadecimal(self, ctx: SMTLIBv2Parser.HexadecimalContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#binary.
def enterBinary(self, ctx: SMTLIBv2Parser.BinaryContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#binary.
def exitBinary(self, ctx: SMTLIBv2Parser.BinaryContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#string.
def enterString(self, ctx: SMTLIBv2Parser.StringContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#string.
def exitString(self, ctx: SMTLIBv2Parser.StringContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#reg_const.
def enterReg_const(self, ctx: SMTLIBv2Parser.Reg_constContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#reg_const.
def exitReg_const(self, ctx: SMTLIBv2Parser.Reg_constContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#keyword.
def enterKeyword(self, ctx: SMTLIBv2Parser.KeywordContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#keyword.
def exitKeyword(self, ctx: SMTLIBv2Parser.KeywordContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#spec_constant.
def enterSpec_constant(self, ctx: SMTLIBv2Parser.Spec_constantContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#spec_constant.
def exitSpec_constant(self, ctx: SMTLIBv2Parser.Spec_constantContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#s_expr.
def enterS_expr(self, ctx: SMTLIBv2Parser.S_exprContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#s_expr.
def exitS_expr(self, ctx: SMTLIBv2Parser.S_exprContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#index.
def enterIndex(self, ctx: SMTLIBv2Parser.IndexContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#index.
def exitIndex(self, ctx: SMTLIBv2Parser.IndexContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#identifier.
def enterIdentifier(self, ctx: SMTLIBv2Parser.IdentifierContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#identifier.
def exitIdentifier(self, ctx: SMTLIBv2Parser.IdentifierContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#attribute_value.
def enterAttribute_value(self, ctx: SMTLIBv2Parser.Attribute_valueContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#attribute_value.
def exitAttribute_value(self, ctx: SMTLIBv2Parser.Attribute_valueContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#attribute.
def enterAttribute(self, ctx: SMTLIBv2Parser.AttributeContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#attribute.
def exitAttribute(self, ctx: SMTLIBv2Parser.AttributeContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#sort.
def enterSort(self, ctx: SMTLIBv2Parser.SortContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#sort.
def exitSort(self, ctx: SMTLIBv2Parser.SortContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#qual_identifier.
def enterQual_identifier(self, ctx: SMTLIBv2Parser.Qual_identifierContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#qual_identifier.
def exitQual_identifier(self, ctx: SMTLIBv2Parser.Qual_identifierContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#var_binding.
def enterVar_binding(self, ctx: SMTLIBv2Parser.Var_bindingContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#var_binding.
def exitVar_binding(self, ctx: SMTLIBv2Parser.Var_bindingContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#sorted_var.
def enterSorted_var(self, ctx: SMTLIBv2Parser.Sorted_varContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#sorted_var.
def exitSorted_var(self, ctx: SMTLIBv2Parser.Sorted_varContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#pattern.
def enterPattern(self, ctx: SMTLIBv2Parser.PatternContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#pattern.
def exitPattern(self, ctx: SMTLIBv2Parser.PatternContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#match_case.
def enterMatch_case(self, ctx: SMTLIBv2Parser.Match_caseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#match_case.
def exitMatch_case(self, ctx: SMTLIBv2Parser.Match_caseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#term.
def enterTerm(self, ctx: SMTLIBv2Parser.TermContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#term.
def exitTerm(self, ctx: SMTLIBv2Parser.TermContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#sort_symbol_decl.
def enterSort_symbol_decl(self, ctx: SMTLIBv2Parser.Sort_symbol_declContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#sort_symbol_decl.
def exitSort_symbol_decl(self, ctx: SMTLIBv2Parser.Sort_symbol_declContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#meta_spec_constant.
def enterMeta_spec_constant(self, ctx: SMTLIBv2Parser.Meta_spec_constantContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#meta_spec_constant.
def exitMeta_spec_constant(self, ctx: SMTLIBv2Parser.Meta_spec_constantContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#fun_symbol_decl.
def enterFun_symbol_decl(self, ctx: SMTLIBv2Parser.Fun_symbol_declContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#fun_symbol_decl.
def exitFun_symbol_decl(self, ctx: SMTLIBv2Parser.Fun_symbol_declContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#par_fun_symbol_decl.
def enterPar_fun_symbol_decl(self, ctx: SMTLIBv2Parser.Par_fun_symbol_declContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#par_fun_symbol_decl.
def exitPar_fun_symbol_decl(self, ctx: SMTLIBv2Parser.Par_fun_symbol_declContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#theory_attribute.
def enterTheory_attribute(self, ctx: SMTLIBv2Parser.Theory_attributeContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#theory_attribute.
def exitTheory_attribute(self, ctx: SMTLIBv2Parser.Theory_attributeContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#theory_decl.
def enterTheory_decl(self, ctx: SMTLIBv2Parser.Theory_declContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#theory_decl.
def exitTheory_decl(self, ctx: SMTLIBv2Parser.Theory_declContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#logic_attribue.
def enterLogic_attribue(self, ctx: SMTLIBv2Parser.Logic_attribueContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#logic_attribue.
def exitLogic_attribue(self, ctx: SMTLIBv2Parser.Logic_attribueContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#logic.
def enterLogic(self, ctx: SMTLIBv2Parser.LogicContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#logic.
def exitLogic(self, ctx: SMTLIBv2Parser.LogicContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#sort_dec.
def enterSort_dec(self, ctx: SMTLIBv2Parser.Sort_decContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#sort_dec.
def exitSort_dec(self, ctx: SMTLIBv2Parser.Sort_decContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#selector_dec.
def enterSelector_dec(self, ctx: SMTLIBv2Parser.Selector_decContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#selector_dec.
def exitSelector_dec(self, ctx: SMTLIBv2Parser.Selector_decContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#constructor_dec.
def enterConstructor_dec(self, ctx: SMTLIBv2Parser.Constructor_decContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#constructor_dec.
def exitConstructor_dec(self, ctx: SMTLIBv2Parser.Constructor_decContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#datatype_dec.
def enterDatatype_dec(self, ctx: SMTLIBv2Parser.Datatype_decContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#datatype_dec.
def exitDatatype_dec(self, ctx: SMTLIBv2Parser.Datatype_decContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#function_dec.
def enterFunction_dec(self, ctx: SMTLIBv2Parser.Function_decContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#function_dec.
def exitFunction_dec(self, ctx: SMTLIBv2Parser.Function_decContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#function_def.
def enterFunction_def(self, ctx: SMTLIBv2Parser.Function_defContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#function_def.
def exitFunction_def(self, ctx: SMTLIBv2Parser.Function_defContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#prop_literal.
def enterProp_literal(self, ctx: SMTLIBv2Parser.Prop_literalContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#prop_literal.
def exitProp_literal(self, ctx: SMTLIBv2Parser.Prop_literalContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#script.
def enterScript(self, ctx: SMTLIBv2Parser.ScriptContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#script.
def exitScript(self, ctx: SMTLIBv2Parser.ScriptContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_assert.
def enterCmd_assert(self, ctx: SMTLIBv2Parser.Cmd_assertContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_assert.
def exitCmd_assert(self, ctx: SMTLIBv2Parser.Cmd_assertContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_assertSoft.
def enterCmd_assertSoft(self, ctx: SMTLIBv2Parser.Cmd_assertSoftContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_assertSoft.
def exitCmd_assertSoft(self, ctx: SMTLIBv2Parser.Cmd_assertSoftContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_simplify.
def enterCmd_simplify(self, ctx: SMTLIBv2Parser.Cmd_simplifyContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_simplify.
def exitCmd_simplify(self, ctx: SMTLIBv2Parser.Cmd_simplifyContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_checkSat.
def enterCmd_checkSat(self, ctx: SMTLIBv2Parser.Cmd_checkSatContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_checkSat.
def exitCmd_checkSat(self, ctx: SMTLIBv2Parser.Cmd_checkSatContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_checkSatAssuming.
def enterCmd_checkSatAssuming(
self, ctx: SMTLIBv2Parser.Cmd_checkSatAssumingContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_checkSatAssuming.
def exitCmd_checkSatAssuming(self, ctx: SMTLIBv2Parser.Cmd_checkSatAssumingContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_checkSatUsing.
def enterCmd_checkSatUsing(self, ctx: SMTLIBv2Parser.Cmd_checkSatUsingContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_checkSatUsing.
def exitCmd_checkSatUsing(self, ctx: SMTLIBv2Parser.Cmd_checkSatUsingContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_minimize.
def enterCmd_minimize(self, ctx: SMTLIBv2Parser.Cmd_minimizeContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_minimize.
def exitCmd_minimize(self, ctx: SMTLIBv2Parser.Cmd_minimizeContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_maximize.
def enterCmd_maximize(self, ctx: SMTLIBv2Parser.Cmd_maximizeContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_maximize.
def exitCmd_maximize(self, ctx: SMTLIBv2Parser.Cmd_maximizeContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareConst.
def enterCmd_declareConst(self, ctx: SMTLIBv2Parser.Cmd_declareConstContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareConst.
def exitCmd_declareConst(self, ctx: SMTLIBv2Parser.Cmd_declareConstContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareDatatype.
def enterCmd_declareDatatype(self, ctx: SMTLIBv2Parser.Cmd_declareDatatypeContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareDatatype.
def exitCmd_declareDatatype(self, ctx: SMTLIBv2Parser.Cmd_declareDatatypeContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareCodatatype.
def enterCmd_declareCodatatype(
self, ctx: SMTLIBv2Parser.Cmd_declareCodatatypeContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareCodatatype.
def exitCmd_declareCodatatype(
self, ctx: SMTLIBv2Parser.Cmd_declareCodatatypeContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareDatatypes.
def enterCmd_declareDatatypes(
self, ctx: SMTLIBv2Parser.Cmd_declareDatatypesContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareDatatypes.
def exitCmd_declareDatatypes(self, ctx: SMTLIBv2Parser.Cmd_declareDatatypesContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareCodatatypes.
def enterCmd_declareCodatatypes(
self, ctx: SMTLIBv2Parser.Cmd_declareCodatatypesContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareCodatatypes.
def exitCmd_declareCodatatypes(
self, ctx: SMTLIBv2Parser.Cmd_declareCodatatypesContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareFun.
def enterCmd_declareFun(self, ctx: SMTLIBv2Parser.Cmd_declareFunContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareFun.
def exitCmd_declareFun(self, ctx: SMTLIBv2Parser.Cmd_declareFunContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_declareSort.
def enterCmd_declareSort(self, ctx: SMTLIBv2Parser.Cmd_declareSortContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_declareSort.
def exitCmd_declareSort(self, ctx: SMTLIBv2Parser.Cmd_declareSortContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_define.
def enterCmd_define(self, ctx: SMTLIBv2Parser.Cmd_defineContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_define.
def exitCmd_define(self, ctx: SMTLIBv2Parser.Cmd_defineContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_defineFun.
def enterCmd_defineFun(self, ctx: SMTLIBv2Parser.Cmd_defineFunContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_defineFun.
def exitCmd_defineFun(self, ctx: SMTLIBv2Parser.Cmd_defineFunContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_defineConst.
def enterCmd_defineConst(self, ctx: SMTLIBv2Parser.Cmd_defineConstContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_defineConst.
def exitCmd_defineConst(self, ctx: SMTLIBv2Parser.Cmd_defineConstContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_defineFunRec.
def enterCmd_defineFunRec(self, ctx: SMTLIBv2Parser.Cmd_defineFunRecContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_defineFunRec.
def exitCmd_defineFunRec(self, ctx: SMTLIBv2Parser.Cmd_defineFunRecContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_defineFunsRec.
def enterCmd_defineFunsRec(self, ctx: SMTLIBv2Parser.Cmd_defineFunsRecContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_defineFunsRec.
def exitCmd_defineFunsRec(self, ctx: SMTLIBv2Parser.Cmd_defineFunsRecContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_defineSort.
def enterCmd_defineSort(self, ctx: SMTLIBv2Parser.Cmd_defineSortContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_defineSort.
def exitCmd_defineSort(self, ctx: SMTLIBv2Parser.Cmd_defineSortContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_display.
def enterCmd_display(self, ctx: SMTLIBv2Parser.Cmd_displayContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_display.
def exitCmd_display(self, ctx: SMTLIBv2Parser.Cmd_displayContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_echo.
def enterCmd_echo(self, ctx: SMTLIBv2Parser.Cmd_echoContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_echo.
def exitCmd_echo(self, ctx: SMTLIBv2Parser.Cmd_echoContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_eval.
def enterCmd_eval(self, ctx: SMTLIBv2Parser.Cmd_evalContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_eval.
def exitCmd_eval(self, ctx: SMTLIBv2Parser.Cmd_evalContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_exit.
def enterCmd_exit(self, ctx: SMTLIBv2Parser.Cmd_exitContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_exit.
def exitCmd_exit(self, ctx: SMTLIBv2Parser.Cmd_exitContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_GetObjectives.
def enterCmd_GetObjectives(self, ctx: SMTLIBv2Parser.Cmd_GetObjectivesContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_GetObjectives.
def exitCmd_GetObjectives(self, ctx: SMTLIBv2Parser.Cmd_GetObjectivesContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getAssertions.
def enterCmd_getAssertions(self, ctx: SMTLIBv2Parser.Cmd_getAssertionsContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getAssertions.
def exitCmd_getAssertions(self, ctx: SMTLIBv2Parser.Cmd_getAssertionsContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getAssignment.
def enterCmd_getAssignment(self, ctx: SMTLIBv2Parser.Cmd_getAssignmentContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getAssignment.
def exitCmd_getAssignment(self, ctx: SMTLIBv2Parser.Cmd_getAssignmentContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getInfo.
def enterCmd_getInfo(self, ctx: SMTLIBv2Parser.Cmd_getInfoContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getInfo.
def exitCmd_getInfo(self, ctx: SMTLIBv2Parser.Cmd_getInfoContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getModel.
def enterCmd_getModel(self, ctx: SMTLIBv2Parser.Cmd_getModelContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getModel.
def exitCmd_getModel(self, ctx: SMTLIBv2Parser.Cmd_getModelContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_blockModel.
def enterCmd_blockModel(self, ctx: SMTLIBv2Parser.Cmd_blockModelContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_blockModel.
def exitCmd_blockModel(self, ctx: SMTLIBv2Parser.Cmd_blockModelContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getOption.
def enterCmd_getOption(self, ctx: SMTLIBv2Parser.Cmd_getOptionContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getOption.
def exitCmd_getOption(self, ctx: SMTLIBv2Parser.Cmd_getOptionContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getProof.
def enterCmd_getProof(self, ctx: SMTLIBv2Parser.Cmd_getProofContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getProof.
def exitCmd_getProof(self, ctx: SMTLIBv2Parser.Cmd_getProofContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getUnsatAssumptions.
def enterCmd_getUnsatAssumptions(
self, ctx: SMTLIBv2Parser.Cmd_getUnsatAssumptionsContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getUnsatAssumptions.
def exitCmd_getUnsatAssumptions(
self, ctx: SMTLIBv2Parser.Cmd_getUnsatAssumptionsContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_labels.
def enterCmd_labels(self, ctx: SMTLIBv2Parser.Cmd_labelsContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_labels.
def exitCmd_labels(self, ctx: SMTLIBv2Parser.Cmd_labelsContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getUnsatCore.
def enterCmd_getUnsatCore(self, ctx: SMTLIBv2Parser.Cmd_getUnsatCoreContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getUnsatCore.
def exitCmd_getUnsatCore(self, ctx: SMTLIBv2Parser.Cmd_getUnsatCoreContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_getValue.
def enterCmd_getValue(self, ctx: SMTLIBv2Parser.Cmd_getValueContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_getValue.
def exitCmd_getValue(self, ctx: SMTLIBv2Parser.Cmd_getValueContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_pop.
def enterCmd_pop(self, ctx: SMTLIBv2Parser.Cmd_popContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_pop.
def exitCmd_pop(self, ctx: SMTLIBv2Parser.Cmd_popContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_poly_factor.
def enterCmd_poly_factor(self, ctx: SMTLIBv2Parser.Cmd_poly_factorContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_poly_factor.
def exitCmd_poly_factor(self, ctx: SMTLIBv2Parser.Cmd_poly_factorContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_push.
def enterCmd_push(self, ctx: SMTLIBv2Parser.Cmd_pushContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_push.
def exitCmd_push(self, ctx: SMTLIBv2Parser.Cmd_pushContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_reset.
def enterCmd_reset(self, ctx: SMTLIBv2Parser.Cmd_resetContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_reset.
def exitCmd_reset(self, ctx: SMTLIBv2Parser.Cmd_resetContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_resetAssertions.
def enterCmd_resetAssertions(self, ctx: SMTLIBv2Parser.Cmd_resetAssertionsContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_resetAssertions.
def exitCmd_resetAssertions(self, ctx: SMTLIBv2Parser.Cmd_resetAssertionsContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_setInfo.
def enterCmd_setInfo(self, ctx: SMTLIBv2Parser.Cmd_setInfoContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_setInfo.
def exitCmd_setInfo(self, ctx: SMTLIBv2Parser.Cmd_setInfoContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_setLogic.
def enterCmd_setLogic(self, ctx: SMTLIBv2Parser.Cmd_setLogicContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_setLogic.
def exitCmd_setLogic(self, ctx: SMTLIBv2Parser.Cmd_setLogicContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#cmd_setOption.
def enterCmd_setOption(self, ctx: SMTLIBv2Parser.Cmd_setOptionContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#cmd_setOption.
def exitCmd_setOption(self, ctx: SMTLIBv2Parser.Cmd_setOptionContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#tac_then.
def enterTac_then(self, ctx: SMTLIBv2Parser.Tac_thenContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#tac_then.
def exitTac_then(self, ctx: SMTLIBv2Parser.Tac_thenContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#tac_and_then.
def enterTac_and_then(self, ctx: SMTLIBv2Parser.Tac_and_thenContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#tac_and_then.
def exitTac_and_then(self, ctx: SMTLIBv2Parser.Tac_and_thenContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#par_then.
def enterPar_then(self, ctx: SMTLIBv2Parser.Par_thenContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#par_then.
def exitPar_then(self, ctx: SMTLIBv2Parser.Par_thenContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#or_else.
def enterOr_else(self, ctx: SMTLIBv2Parser.Or_elseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#or_else.
def exitOr_else(self, ctx: SMTLIBv2Parser.Or_elseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#par_or_else.
def enterPar_or_else(self, ctx: SMTLIBv2Parser.Par_or_elseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#par_or_else.
def exitPar_or_else(self, ctx: SMTLIBv2Parser.Par_or_elseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#par_or.
def enterPar_or(self, ctx: SMTLIBv2Parser.Par_orContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#par_or.
def exitPar_or(self, ctx: SMTLIBv2Parser.Par_orContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#tryFor.
def enterTryFor(self, ctx: SMTLIBv2Parser.TryForContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#tryFor.
def exitTryFor(self, ctx: SMTLIBv2Parser.TryForContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#usingParams.
def enterUsingParams(self, ctx: SMTLIBv2Parser.UsingParamsContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#usingParams.
def exitUsingParams(self, ctx: SMTLIBv2Parser.UsingParamsContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#tactical.
def enterTactical(self, ctx: SMTLIBv2Parser.TacticalContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#tactical.
def exitTactical(self, ctx: SMTLIBv2Parser.TacticalContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#command.
def enterCommand(self, ctx: SMTLIBv2Parser.CommandContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#command.
def exitCommand(self, ctx: SMTLIBv2Parser.CommandContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#b_value.
def enterB_value(self, ctx: SMTLIBv2Parser.B_valueContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#b_value.
def exitB_value(self, ctx: SMTLIBv2Parser.B_valueContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#option.
def enterOption(self, ctx: SMTLIBv2Parser.OptionContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#option.
def exitOption(self, ctx: SMTLIBv2Parser.OptionContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#info_flag.
def enterInfo_flag(self, ctx: SMTLIBv2Parser.Info_flagContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#info_flag.
def exitInfo_flag(self, ctx: SMTLIBv2Parser.Info_flagContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#error_behaviour.
def enterError_behaviour(self, ctx: SMTLIBv2Parser.Error_behaviourContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#error_behaviour.
def exitError_behaviour(self, ctx: SMTLIBv2Parser.Error_behaviourContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#reason_unknown.
def enterReason_unknown(self, ctx: SMTLIBv2Parser.Reason_unknownContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#reason_unknown.
def exitReason_unknown(self, ctx: SMTLIBv2Parser.Reason_unknownContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#model_response.
def enterModel_response(self, ctx: SMTLIBv2Parser.Model_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#model_response.
def exitModel_response(self, ctx: SMTLIBv2Parser.Model_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#info_response.
def enterInfo_response(self, ctx: SMTLIBv2Parser.Info_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#info_response.
def exitInfo_response(self, ctx: SMTLIBv2Parser.Info_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#valuation_pair.
def enterValuation_pair(self, ctx: SMTLIBv2Parser.Valuation_pairContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#valuation_pair.
def exitValuation_pair(self, ctx: SMTLIBv2Parser.Valuation_pairContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#t_valuation_pair.
def enterT_valuation_pair(self, ctx: SMTLIBv2Parser.T_valuation_pairContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#t_valuation_pair.
def exitT_valuation_pair(self, ctx: SMTLIBv2Parser.T_valuation_pairContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#check_sat_response.
def enterCheck_sat_response(self, ctx: SMTLIBv2Parser.Check_sat_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#check_sat_response.
def exitCheck_sat_response(self, ctx: SMTLIBv2Parser.Check_sat_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#echo_response.
def enterEcho_response(self, ctx: SMTLIBv2Parser.Echo_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#echo_response.
def exitEcho_response(self, ctx: SMTLIBv2Parser.Echo_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_assertions_response.
def enterGet_assertions_response(
self, ctx: SMTLIBv2Parser.Get_assertions_responseContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_assertions_response.
def exitGet_assertions_response(
self, ctx: SMTLIBv2Parser.Get_assertions_responseContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_assignment_response.
def enterGet_assignment_response(
self, ctx: SMTLIBv2Parser.Get_assignment_responseContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_assignment_response.
def exitGet_assignment_response(
self, ctx: SMTLIBv2Parser.Get_assignment_responseContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_info_response.
def enterGet_info_response(self, ctx: SMTLIBv2Parser.Get_info_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_info_response.
def exitGet_info_response(self, ctx: SMTLIBv2Parser.Get_info_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_model_response.
def enterGet_model_response(self, ctx: SMTLIBv2Parser.Get_model_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_model_response.
def exitGet_model_response(self, ctx: SMTLIBv2Parser.Get_model_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_option_response.
def enterGet_option_response(self, ctx: SMTLIBv2Parser.Get_option_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_option_response.
def exitGet_option_response(self, ctx: SMTLIBv2Parser.Get_option_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_proof_response.
def enterGet_proof_response(self, ctx: SMTLIBv2Parser.Get_proof_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_proof_response.
def exitGet_proof_response(self, ctx: SMTLIBv2Parser.Get_proof_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_unsat_assump_response.
def enterGet_unsat_assump_response(
self, ctx: SMTLIBv2Parser.Get_unsat_assump_responseContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_unsat_assump_response.
def exitGet_unsat_assump_response(
self, ctx: SMTLIBv2Parser.Get_unsat_assump_responseContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_unsat_core_response.
def enterGet_unsat_core_response(
self, ctx: SMTLIBv2Parser.Get_unsat_core_responseContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_unsat_core_response.
def exitGet_unsat_core_response(
self, ctx: SMTLIBv2Parser.Get_unsat_core_responseContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#get_value_response.
def enterGet_value_response(self, ctx: SMTLIBv2Parser.Get_value_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#get_value_response.
def exitGet_value_response(self, ctx: SMTLIBv2Parser.Get_value_responseContext):
pass
# Enter a parse tree produced by SMTLIBv2Parser#specific_success_response.
def enterSpecific_success_response(
self, ctx: SMTLIBv2Parser.Specific_success_responseContext
):
pass
# Exit a parse tree produced by SMTLIBv2Parser#specific_success_response.
def exitSpecific_success_response(
self, ctx: SMTLIBv2Parser.Specific_success_responseContext
):
pass
# Enter a parse tree produced by SMTLIBv2Parser#general_response.
def enterGeneral_response(self, ctx: SMTLIBv2Parser.General_responseContext):
pass
# Exit a parse tree produced by SMTLIBv2Parser#general_response.
def exitGeneral_response(self, ctx: SMTLIBv2Parser.General_responseContext):
pass
del SMTLIBv2Parser
|
supriya/assets/__init__.py | butayama/supriya | 191 | 12608277 | <reponame>butayama/supriya
from . import synthdefs # noqa
|
NLP_classification/download_wikitext.py | ngduyanhece/outlier-exposure | 442 | 12608297 | # -*- coding: utf-8 -*-
"""
Trains a MNIST classifier.
"""
import numpy as np
import sys
import os
import pickle
import argparse
import math
import time
from bisect import bisect_left
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as trn
import torchvision.datasets as dset
import torch.nn.functional as F
from torch.autograd import Variable as V
import torchtext
from torchtext import data
from torchtext import datasets
import tqdm
np.random.seed(1)
parser = argparse.ArgumentParser(description='SST OE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=5, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int, default=64, help='Batch size.')
parser.add_argument('--learning_rate', '-lr', type=float, default=0.01, help='The initial learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.5, help='Momentum.')
parser.add_argument('--test_bs', type=int, default=256)
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./snapshots', help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str, default='./snapshots', help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true', help='Test only flag.')
parser.add_argument('--mix', dest='mix', action='store_true', help='Mix outliers images with in-dist images.')
# Acceleration
parser.add_argument('--prefetch', type=int, default=2, help='Pre-fetching threads.')
args = parser.parse_args()
# ============================ SST ============================ #
# set up fields
TEXT_sst = data.Field(pad_first=True)
LABEL_sst = data.Field(sequential=False)
# make splits for data
train_sst, val_sst, test_sst = datasets.SST.splits(
TEXT_sst, LABEL_sst, fine_grained=False, train_subtrees=False,
filter_pred=lambda ex: ex.label != 'neutral')
# build vocab
TEXT_sst.build_vocab(train_sst, max_size=10000)
LABEL_sst.build_vocab(train_sst, max_size=10000)
print('vocab length (including special tokens):', len(TEXT_sst.vocab))
# create our own iterator, avoiding the calls to build_vocab in SST.iters
train_iter_sst, val_iter_sst, test_iter_sst = data.BucketIterator.splits(
(train_sst, val_sst, test_sst), batch_size=args.batch_size, repeat=False)
# ============================ SST ============================ #
# ============================ WikiText-2 ============================ #
# set up fields
TEXT_wtxt = data.Field(pad_first=True, lower=True)
# make splits for data
train_OE, val_OE, test_OE = datasets.WikiText2.splits(TEXT_wtxt)
# build vocab
TEXT_wtxt.build_vocab(train_sst.text, max_size=10000)
print('vocab length (including special tokens):', len(TEXT_wtxt.vocab))
# create our own iterator, avoiding the calls to build_vocab in SST.iters
train_iter_oe, val_iter_oe, test_iter_oe = data.BPTTIterator.splits(
(train_OE, val_OE, test_OE), batch_size=args.batch_size, bptt_len=15, repeat=False)
# ============================ WikiText-2 ============================ #
# ============================ WikiText-103 ============================ #
# set up fields
TEXT_wtxt = data.Field(pad_first=True, lower=True)
# make splits for data
train_OE, val_OE, test_OE = datasets.WikiText103.splits(TEXT_wtxt)
# build vocab
TEXT_wtxt.build_vocab(train_sst.text, max_size=10000)
print('vocab length (including special tokens):', len(TEXT_wtxt.vocab))
# create our own iterator, avoiding the calls to build_vocab in SST.iters
train_iter_oe, val_iter_oe, test_iter_oe = data.BPTTIterator.splits(
(train_OE, val_OE, test_OE), batch_size=args.batch_size, bptt_len=15, repeat=False)
# ============================ WikiText-103 ============================ #
exit() |
examples/authenticated_relayer/make_user_db.py | kasimov-maxim/aiosmtpd | 257 | 12608317 | <filename>examples/authenticated_relayer/make_user_db.py
# Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
import sqlite3
from argon2 import PasswordHasher
from pathlib import Path
from typing import Dict
DB_FILE = "mail.db~"
USER_AND_PASSWORD: Dict[str, str] = {
"user1": "<PASSWORD>",
"user2": "<PASSWORD>",
"user3": "<PASSWORD>",
"user4": "password",
"user5": "<PASSWORD>",
"user6": "a quick brown fox jumps over a lazy dog"
}
if __name__ == '__main__':
dbfp = Path(DB_FILE).absolute()
if dbfp.exists():
dbfp.unlink()
conn = sqlite3.connect(DB_FILE)
curs = conn.cursor()
curs.execute("CREATE TABLE userauth (username text, hashpass text)")
ph = PasswordHasher()
insert_up = "INSERT INTO userauth VALUES (?, ?)"
for u, p in USER_AND_PASSWORD.items():
h = ph.hash(p)
curs.execute(insert_up, (u, h))
conn.commit()
conn.close()
assert dbfp.exists()
print(f"database created at {dbfp}")
|
workshop_material/022_find_galaxies2.py | nrupatunga/pyimageconf2018 | 106 | 12608327 |
from dlib import *
from math import sqrt
img = load_grayscale_image('images/find_galaxies/nasa_crop.jpg')
win = image_window(img)
# instead of thresholding we can use a watershed, which is much more appropriate in this case
img = gaussian_blur(img, 0.5);
labels,num_blobs = label_connected_blobs_watershed(img,
background_thresh=partition_pixels(img),
smoothing=2);
print("num_blobs: {}".format(num_blobs))
win2 = image_window(randomly_color_image(labels))
rects = rectangles(num_blobs)
for r in range(labels.shape[0]):
for c in range(labels.shape[1]):
if labels[r][c] != 0:
rects[labels[r][c]] += point(c,r)
win.add_overlay(rects)
# You can also crop out the galaxies, presumably to pass them to some classifier.
# Depending on what you are doing it might be appropriate to use boxes that preserve aspect
# ratios in the cropped images, such as this:
sqrects = [centered_rect(r,int(1.5*sqrt(r.area())),int(1.5*sqrt(r.area()))) for r in rects]
# make crop plans that will all be 40x40 pixels in size
dets = [chip_details(r,chip_dims(40,40)) for r in rects]
#dets = [chip_details(r,40*40) for r in rects]
win3 = image_window(tile_images(extract_image_chips(img, dets)))
input("hit enter to continue")
|
catboost/R-package/mk_package.py | HeyLey/catboost | 6,989 | 12608355 | <reponame>HeyLey/catboost
from __future__ import print_function
import argparse
import os
import re
import shutil
import subprocess as sp
import sys
import tempfile
def _execute(cmd, **kwargs):
print('{}> {}'.format(os.getcwd(), ' '.join(cmd)))
if kwargs:
assert 0 == sp.check_call(cmd, **kwargs)
else:
assert 0 == sp.check_call(cmd, stdout=open(os.devnull, 'wb'))
def _host_os_eq(target_os):
return os.name == target_os
class Cwd(object):
def __init__(self, dir):
self.target_dir = dir
def __enter__(self):
self.save_dir = os.getcwd()
os.chdir(self.target_dir)
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.save_dir)
if exc_val:
raise
class RPackager(object):
def __init__(self, r_dir, target_os, keep_temp):
self.target_os = target_os
self.r_dir = os.path.abspath(r_dir)
self.version = [l for l in open(self.r_dir + '/DESCRIPTION').readlines() if 'Version:' in l][0].split()[1]
self.build_dir = tempfile.mkdtemp()
self.stem = 'catboost-R-{}-{}'.format(self.target_os, self.version)
self.keep_temp = keep_temp
def __del__(self):
if self.keep_temp:
print('Keep temp directory {}'.format(self.build_dir))
else:
import shutil # gc may have discarded it
shutil.rmtree(self.build_dir)
# @return path to the package
def build(self, store_dir):
package_dir = os.path.abspath(self.build_dir + '/catboost')
ya = os.path.abspath(self.r_dir + '/../../ya')
os.makedirs(package_dir)
export_with = 'svn'
for obj in ['DESCRIPTION', 'NAMESPACE', 'README.md', 'R', 'inst', 'man', 'tests']:
src = os.path.join(self.r_dir, obj)
dst = os.path.join(package_dir, obj)
if export_with == 'svn':
try:
_execute([ya, 'svn', 'export', src, dst])
except sp.CalledProcessError:
export_with = 'git'
if export_with == 'git':
tmp = tempfile.mkstemp('.tar.gz')[1]
with Cwd(os.path.dirname(src)):
_execute(['git', 'archive', '--output', tmp, 'HEAD', os.path.basename(src)])
with Cwd(os.path.dirname(dst)):
_execute(['tar', '-xvpf', tmp])
os.unlink(tmp)
_execute([ya, 'make', '-r', self.r_dir + '/src'] + ([] if _host_os_eq(self.target_os) else ['--target-platform={}'.format(self.target_os)]))
if self.target_os == 'Windows':
src = self.r_dir + '/src/libcatboostr.dll'
dst = package_dir + '/inst/libs/x64/libcatboostr.dll'
else:
src = self.r_dir + '/src/libcatboostr.so'
dst = package_dir + '/inst/libs/libcatboostr.so'
os.makedirs(os.path.dirname(dst))
shutil.copy2(src, dst)
# Create the package
result = os.path.join(os.path.abspath(store_dir), self.stem + '.tgz')
if not os.path.exists(os.path.dirname(result)):
os.makedirs(os.path.dirname(result))
with Cwd(self.build_dir):
_execute(['tar', '-cvzf', result, 'catboost'])
return result
# @return path to the package
def build_and_install_with_r(self, store_dir):
if not _host_os_eq(self.target_os):
raise ValueError('Cannot run R: host {}, target_os {}'.format(os.uname()[0], self.target_os))
cmd = ['R', 'CMD', 'INSTALL', self.r_dir, '--build', '--install-tests', '--no-multiarch', '--with-keep.source']
print('EXECUTING {}'.format(' '.join(cmd)))
r = sp.Popen(cmd, stderr=sp.PIPE, universal_newlines=True)
for line in r.stderr.readlines():
sys.stdout.write(line)
m = re.match(r"packaged installation of .* as .*(cat.*[z]).*", line)
if m:
installation = m.group(1)
status = r.wait()
assert status == 0, "Command failed with exit status " + str(status)
src = os.path.join(os.getcwd(), installation)
dst = os.path.join(store_dir, installation)
if not os.path.samefile(src, dst):
shutil.move(src, dst)
return dst
def check_with_r(self):
if not _host_os_eq(self.target_os):
raise ValueError('Cannot run R: host {}, target_os {}'.format(os.uname()[0], self.target_os))
seen_errors = False
cmd = ['R', 'CMD', 'check', self.r_dir, '--no-manual', '--no-examples', '--no-multiarch']
print('EXECUTING {}'.format(' '.join(cmd)))
r = sp.Popen(cmd, stderr=sp.PIPE, universal_newlines=True)
for line in r.stderr.readlines():
sys.stdout.write(line)
m = re.match(r".*ERROR.*", line)
if m:
seen_errors = True
status = r.wait()
assert status == 0, "Command failed with exit status " + str(status)
assert not seen_errors, "Command completed with errors"
def generate_doc_with_r(self):
if not _host_os_eq(self.target_os):
raise ValueError('Cannot run R: host {}, target_os {}'.format(os.uname()[0], self.target_os))
seen_errors = False
cmd = ['R', '-e', 'devtools::document("{}")'.format(self.r_dir)]
print('EXECUTING {}'.format(' '.join(cmd)))
r = sp.Popen(cmd, stderr=sp.PIPE, universal_newlines=True)
for line in r.stderr.readlines():
sys.stdout.write(line)
m = re.match(r".*ERROR.*", line)
if m:
seen_errors = True
status = r.wait()
assert status == 0, "Command failed with exit status " + str(status)
assert not seen_errors, "Command completed with errors"
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--target', metavar='OS', help='Target operating system', choices=['Windows', 'Linux', 'Darwin'], type=str, action='store',
default=os.name)
parser.add_argument('--catboost-r-dir', metavar='PATH', help='Catboost R-package dir', type=str, action='store',
default=os.path.dirname(sys.argv[0]))
parser.add_argument('--store-dir', metavar='PATH', help='Where to put the package', type=str, action='store',
default='.')
parser.add_argument('--generate-doc-with-r', help='Use R to regenerate documentation', action='store_true',
default=False)
parser.add_argument('--check-with-r', help='Use R to check the package before build', action='store_true',
default=False)
parser.add_argument('--build', help='Create the package', action='store_true',
default=False)
parser.add_argument('--build-with-r', help='Use R to build the package', action='store_true',
default=False)
parser.add_argument('--keep-temp', help='Do not remove temporary directory', action='store_true',
default=False)
args = parser.parse_args()
rpackager = RPackager(args.catboost_r_dir, args.target, args.keep_temp)
if args.generate_doc_with_r and _host_os_eq(args.target):
rpackager.generate_doc_with_r()
if args.check_with_r and _host_os_eq(args.target):
rpackager.check_with_r()
if args.build_with_r:
result = rpackager.build_and_install_with_r(args.store_dir)
print('Built {}'.format(result))
if args.build:
result = rpackager.build(args.store_dir)
print('Built {}'.format(result))
|
RecoLocalTracker/SiStripRecHitConverter/python/customiseNewStripCPE.py | ckamtsikis/cmssw | 852 | 12608389 | <filename>RecoLocalTracker/SiStripRecHitConverter/python/customiseNewStripCPE.py
import FWCore.ParameterSet.Config as cms
def customiseNewStripCPE(process):
process.StripCPEfromTrackAngleESProducer.parameters.useLegacyError = True
process.StripCPEfromTrackAngleESProducer.parameters.maxChgOneMIP = -6000.
return process
|
scripts/log-stats/bound-sessions.py | refraction-networking/tapdance | 106 | 12608417 | <filename>scripts/log-stats/bound-sessions.py<gh_stars>100-1000
#!/usr/bin/python
import sys
import time
START_TIME = time.mktime(time.strptime('Sun May 14 00:00:00 2017'))
END_TIME = time.mktime(time.strptime('Sun May 21 00:00:00 2017'))
ADJUST_TIME = 0
for line in sys.stdin:
sp = line.split(' ', 1)
ts = sp[0]
ts = float(ts)
ts_adjust = ts + ADJUST_TIME
line = '%.06f %s' % (ts_adjust, sp[1])
if ts >= START_TIME and ts < END_TIME:
sys.stdout.write(line)
##day = time.strftime('%d', time.localtime(ts))
#
# if f is None or day != last_day:
# if f is not None:
# f.close()
# f = open('./data/session-%s.out' % day, 'w')
#
# f.write(line)
# last_day = day
#
|
tag_for_NER/convert_tag.py | bamtercelboo/corpus_process_script | 170 | 12608439 | # @Author : bamtercelboo
# @Datetime : 2018/8/11 14:20
# @File : convert_tag.py
# @Last Modify Time : 2018/8/11 14:20
# @Contact : <EMAIL>, <EMAIL>}
"""
FILE : convert_tag.py
BIO2BMESO : BIO ---> BMESO
0 I B-ARG1
1 just B-ARGM-TMP
2 got B-V-*
3 back B-ARGM-DIR
4 from I-ARGM-DIR
5 the I-ARGM-DIR
6 gym I-ARGM-DIR
7 . O
---->
0 I S-ARG1
1 just S-ARGM-TMP
2 got S-V-*
3 back B-ARGM-DIR
4 from M-ARGM-DIR
5 the M-ARGM-DIR
6 gym E-ARGM-DIR
7 . O
文件的末尾必须有两个空行
run: python -u convert_tag.py --input in.txt --output out.txt
"""
import os
import time
from optparse import OptionParser
class TagConvert(object):
def __init__(self, infile, outfile, tag):
self.infile = infile
self.outfile = outfile
self.tag = tag
if tag.upper() == "BMESO":
self.BIO2BMESO(self.infile, self.outfile)
elif tag.upper() == "BIESO":
self.BIO2BIESO(self.infile, self.outfile)
def BIO2BIESO(self, input_file, output_file):
print("Convert BIO -> BIESO for file:", input_file)
with open(input_file, encoding="UTF-8") as in_file:
fins = in_file.readlines()
if os.path.exists(output_file):
os.remove(output_file)
fout = open(output_file, mode="w", encoding="UTF-8")
words = []
words_en = []
labels = []
for line in fins:
# print(line)
if len(line) < 3:
sent_len = len(words)
for idx in range(sent_len):
print(words)
print(labels)
if "-" not in labels[idx]:
fout.write(words[idx] + "\t" + labels[idx] + "\n")
else:
label_type = "-".join(labels[idx].split("-")[1:])
# print(label_type)
if "B-" in labels[idx]:
if (idx == sent_len - 1) or ("I-" not in labels[idx + 1]):
fout.write(words[idx] + "\tS-" + label_type + "\n")
else:
fout.write(words[idx] + "\tB-" + label_type + "\n")
elif "I-" in labels[idx]:
if (idx == sent_len - 1) or ("I-" not in labels[idx + 1]):
fout.write(words[idx] + "\tE-" + label_type + "\n")
else:
fout.write(words[idx] + "\tI-" + label_type + "\n")
fout.write('\n')
words = []
words_en = []
labels = []
else:
pair = line.strip('\n').split()
# words.append(pair[0])
words.append("\t".join(pair[0:2]))
labels.append(pair[-1].upper())
fout.close()
print("BIESO file generated:", output_file)
def BIO2BMESO(self, input_file, output_file):
print("Convert BIO -> BMESO for file:", input_file)
with open(input_file, encoding="UTF-8") as in_file:
fins = in_file.readlines()
if os.path.exists(output_file):
os.remove(output_file)
fout = open(output_file, mode="w", encoding="UTF-8")
words = []
words_en = []
labels = []
for line in fins:
# print(line)
if len(line) < 3:
sent_len = len(words)
for idx in range(sent_len):
# print(words)
# print(labels)
if "-" not in labels[idx]:
fout.write(words[idx] + "\t" + labels[idx] + "\n")
else:
label_type = "-".join(labels[idx].split("-")[1:])
# print(label_type)
if "B-" in labels[idx]:
if (idx == sent_len - 1) or ("I-" not in labels[idx + 1]):
fout.write(words[idx] + "\tS-" + label_type + "\n")
else:
fout.write(words[idx] + "\tB-" + label_type + "\n")
elif "I-" in labels[idx]:
if (idx == sent_len - 1) or ("I-" not in labels[idx + 1]):
fout.write(words[idx] + "\tE-" + label_type + "\n")
else:
fout.write(words[idx] + "\tM-" + label_type + "\n")
fout.write('\n')
words = []
words_en = []
labels = []
else:
pair = line.strip('\n').split()
# words.append(pair[0])
words.append("\t".join(pair[0:2]))
labels.append(pair[-1].upper())
fout.close()
print("BMESO file generated:", output_file)
if __name__ == "__main__":
# print("convert tag......")
# input = "./Data/srl/srl.sample.conll"
# output = "./Data/srl/srl.sample.conll.bmeso"
# TagConvert(infile=input, outfile=output, tag="bmeso")
parser = OptionParser()
parser.add_option("--input", dest="input", help="input file")
parser.add_option("--output", dest="output", default="", help="output file")
parser.add_option("--tag", dest="converted_tag", default="bmeso", help="output file")
(options, args) = parser.parse_args()
input = options.input
output = options.output
tag = options.converted_tag
try:
start_time = time.time()
TagConvert(infile=input, outfile=output, tag=tag)
print("All Finished.")
end_time = time.time()
print("Cost Time is {:.4f}.".format(end_time - start_time))
except Exception as err:
print("Tag selection from [BMESO, BIESO]")
print(err)
|
core/cube/cube-builder/tool/kvtool.py | hysunflower/Serving | 789 | 12608462 | <reponame>hysunflower/Serving
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys
import struct
import random
class Stream(object):
""" bytes stream like sys.stdin
"""
def __init__(self, source=None, cache=None):
""" init
"""
self._src = source
self._cache_to = cache
self._cache_fd = None
def read_bytes(self, num):
"""read bytes"""
data = self._src.read(num)
if len(data) < num:
if self._cache_fd is not None:
if len(data) > 0:
self._cache_fd.write(data)
self._cache_fd.close()
print >> sys.stderr, 'succeed to cache file[%s]' % (
self._cache_to)
self._cache_fd = None
raise EOFError
else:
if self._cache_to is not None:
if self._cache_fd is None:
self._cache_fd = open(self._cache_to, 'wb')
self._cache_fd.write(data)
return data
def read_int(self):
"""read int"""
data = self.read_bytes(4)
return struct.unpack('!i', data)[0]
def read_byte(self):
"""read byte"""
byte = self.read_bytes(1)
return struct.unpack('!b', byte)[0]
def read_string(self):
"""read string"""
str_len = self.read_vint()
return unicode(self.read_bytes(str_len), 'utf-8')
def read_bool(self):
"""read bool"""
return bool(self.read_byte())
def read_vint(self):
"""read vint"""
first = self.read_byte()
l = self._decode_vint_size(first)
if l == 1:
return first
x = 0
for i in range(l - 1):
b = self.read_byte()
x = x << 8
x = x | (b & 0xFF)
if self._is_negative_vint(first):
return x ^ -1
return x
def _is_negative_vint(self, val):
"""check is negative vint"""
return val < -120 or (val >= -122 and val < 0)
def _decode_vint_size(self, val):
"""decode vint size"""
if val >= -122:
return 1
elif val < -120:
return -119 - val
return -111 - val
def tell(self):
""" tell """
return self._src.tell()
def seek(self, pos):
""" seek """
self._src.seek(pos)
class SequenceFileReader():
""" a reader for sequencefile
"""
def __init__(self, seqfile=None, cache=None):
""" init
"""
self.type = 'seqfile'
if seqfile is None:
seqfile = sys.stdin
self.stream = Stream(seqfile, cache=cache)
self.version = None
# self.key_class = None
self.compression_class = None
# self.value_class = None
self.compression = False
self.block_compression = False
self.meta = {}
self.sync = None
self._read_header()
if self.compression or self.block_compression:
raise NotImplementedError(
"reading of seqfiles with compression is not implemented.")
def _read_header(self):
""" read sequencefile header
"""
stream = self.stream
seq = stream.read_bytes(3)
if seq != "SEQ":
raise ValueError("given file is not a sequence-file")
self.version = stream.read_byte()
self.key_class = stream.read_string()
self.value_class = stream.read_string()
self.compression = stream.read_bool()
self.block_compression = stream.read_bool()
if self.compression:
self.compression_class = stream.read_string()
meta_len = stream.read_int()
for i in range(meta_len):
key = stream.read_string()
val = stream.read_string()
self.meta[key] = val
self.sync = stream.read_bytes(16)
def __iter__(self):
""" facilitate 'for i in reader:'
"""
while True:
try:
next = self.load()
except EOFError:
raise StopIteration
yield next
def get_type(self):
""" get type of this reader
"""
return self.type
def load(self):
""" read one record
"""
stream = self.stream
buf_len = stream.read_int()
if buf_len == -1:
syncCheck = stream.read_bytes(16)
if syncCheck != self.sync:
raise ValueError("file corrupt, no a valid sequencefile")
buf_len = stream.read_int()
key_len = stream.read_int()
buf = stream.read_bytes(buf_len)
return buf[:key_len], buf[key_len:]
def tell(self):
""" tell the position of currently readed
"""
return self.stream.tell()
def seek(self, pos):
""" seek to the specified position
"""
self.stream.seek(pos)
class SequenceFileWriter(object):
"""A wrapper around file-like object for aid writing sequence files
"""
# sequence file header for uncompressed, version 6 sequence files
SEQ_HEADER = "SEQ\x06" \
"\"org.apache.hadoop.io.BytesWritable\"" \
"org.apache.hadoop.io.BytesWritable" \
"\x00\x00\x00\x00\x00\x00"
# after writing how many bytes of actual data we insert a sync marker
SYNC_INTERVAL = 2000
def __init__(self, f):
""" Construct a sequencefile writer for specified file-like object f
"""
self._f = f
self._sync_marker = ''.join(
[chr(random.randint(0, 255)) for k in range(0, 16)])
self._write_seq_header()
self._bytes_to_prev_sync = 0
def write(self, key, value):
"""Write key-value record to this sequence file
Args:
key: key of this record, should be a str
value: value of this record, should be a str
Returns:
number of bytes written
"""
key_len = len(key)
record_len = key_len + len(value)
b_record_len = struct.pack('>I', record_len)
b_key_len = struct.pack('>I', key_len)
self._f.write(b_record_len + b_key_len)
self._f.write(key)
self._f.write(value)
self._bytes_to_prev_sync += record_len
if self._bytes_to_prev_sync >= SequenceFileWriter.SYNC_INTERVAL:
self._write_sync_marker()
self._bytes_to_prev_sync = 0
def _write_seq_header(self):
"""Write sequence file header to the underlying file
"""
self._f.write(SequenceFileWriter.SEQ_HEADER)
self._f.write(self._sync_marker)
def _write_sync_marker(self):
"""Write sync marker to this sequence file
"""
self._f.write("\xff\xff\xff\xff")
self._f.write(self._sync_marker)
def get_reader(f=None, cache=None):
""" get a kv reader for a stream 'f'
and the type can be 'kvfile' or 'seqfile'
"""
return SequenceFileReader(f, cache=cache)
def test_reader(file_path):
""" test reader of sequencefile
"""
seqfile = file_path
f = open(seqfile, 'rb')
reader = get_reader(f)
# reader = get_reader(sys.stdin, filetype)
ct = 0
for r in reader:
ct += 1
k, v = r
if ct % 2 == 0:
print struct.unpack('Q', k)[0], v
print "read a record with klen:%d,vlen:%d with count:%d" \
% (len(k), len(v), ct)
if __name__ == "__main__":
""" read sequence file to kv, need a param sequence file addr
"""
if len(sys.argv) != 2:
print "error, usage:python kvtool.py seqfile_path"
else:
file_path = sys.argv[1]
test_reader(file_path)
|
lookatme/ascii_art.py | alanxoc3/lookatme | 1,179 | 12608499 | """
Misc ASCII art
"""
WARNING = r"""
_mBma
sQf "QL
jW( -$g.
jW' -$m,
.y@' _aa. 4m,
.mD` ]QQWQ. 4Q,
_mP` ]QQQQ ?Q/
_QF )WQQ@ ?Qc
<QF QQQF )Qa
jW( QQQf "QL
jW' ]H8' -Q6.
.y@' _as. -$m.
.m@` ]QQWQ. -4m,
_mP` -?$8! 4Q,
mE $m
?$gyygggggggggwywgyygggggggygggggD(
"""
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_show_column_types.py | ahmedengu/h2o-3 | 6,098 | 12608501 | df12.types
# {u'A': u'enum', u'C': u'real', u'B': u'enum', u'D': u'real'} |
examples/core/idapythonrc.py | fengjixuchui/src | 1,160 | 12608510 | """
summary: code to be run right after IDAPython initialization
description:
The `idapythonrc.py` file:
* %APPDATA%\Hex-Rays\IDA Pro\idapythonrc.py (on Windows)
* ~/.idapro/idapythonrc.py (on Linux & Mac)
can contain any IDAPython code that will be run as soon as
IDAPython is done successfully initializing.
"""
# Add your favourite script to ScriptBox for easy access
# scriptbox.addscript("/here/is/my/favourite/script.py")
# Uncomment if you want to set Python as default interpreter in IDA
# import ida_idaapi
# ida_idaapi.enable_extlang_python(True)
# Disable the Python from interactive command-line
# import ida_idaapi
# ida_idaapi.enable_python_cli(False)
# Set the timeout for the script execution cancel dialog
# import ida_idaapi
# ida_idaapi.set_script_timeout(10)
|
smartsheet/models/home.py | bromic007/smartsheet-python-sdk | 106 | 12608512 | # pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import json
from .folder import Folder
from .report import Report
from .sheet import Sheet
from .template import Template
from .sight import Sight
from .workspace import Workspace
from ..types import TypedList
from ..util import serialize
from ..util import deserialize
class Home(object):
"""Smartsheet Home data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Home model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._folders = TypedList(Folder)
self._reports = TypedList(Report)
self._sheets = TypedList(Sheet)
self._sights = TypedList(Sight)
self._templates = TypedList(Template)
self._workspaces = TypedList(Workspace)
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
@property
def folders(self):
return self._folders
@folders.setter
def folders(self, value):
self._folders.load(value)
@property
def reports(self):
return self._reports
@reports.setter
def reports(self, value):
self._reports.load(value)
@property
def sheets(self):
return self._sheets
@sheets.setter
def sheets(self, value):
self._sheets.load(value)
@property
def sights(self):
return self._sights
@sights.setter
def sights(self, value):
self._sights.load(value)
@property
def templates(self):
return self._templates
@templates.setter
def templates(self, value):
self._templates.load(value)
@property
def workspaces(self):
return self._workspaces
@workspaces.setter
def workspaces(self, value):
self._workspaces.load(value)
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
poet/modules/template.py | packetgeek/poet | 189 | 12608521 | <reponame>packetgeek/poet
# template, example module
#
# required
#
import module
#
# your imports
#
# import blah
#
# global vars
#
MODNAME = 'template'
USAGE = """Brief description of module.
usage: template [-h] args go here etc
\noptions:
-h\t\tshow help"""
#
# handlers
#
@module.server_handler(MODNAME)
def server(server, argv):
"""Server handler for module.
Args:
server: instance of PoetServer class
argv: list of arguments entered at server shell
"""
# your code goes here
print 'template module, does nothing'
pass
@module.client_handler(MODNAME)
def client(client, inp):
"""Client handler for module.
Args:
client: instance of PoetClient class
inp: command string sent from server
"""
# your code goes here
pass
|
utils.py | SeitaroShinagawa/chainer-partial_convolution_image_inpainting | 116 | 12608534 | <reponame>SeitaroShinagawa/chainer-partial_convolution_image_inpainting<gh_stars>100-1000
import numpy as np
def batch_postprocess_images(img, batch_w, batch_h):
b, ch, w, h = img.shape
img = img.reshape((batch_w, batch_h, ch, w, h))
img = img.transpose(0,1,3,4,2)
img = (img + 1) *127.5
img = np.clip(img, 0, 255)
img = img.astype(np.uint8)
img = img.reshape((batch_w, batch_h, w, h, ch)).transpose(0,2,1,3,4).reshape((w*batch_w, h*batch_h, ch))
return img
|
scripts/31_Model_Parse_T5/12_Generate.py | plandes/amrlib | 103 | 12608552 | <filename>scripts/31_Model_Parse_T5/12_Generate.py
#!/usr/bin/python3
import setup_run_dir # this import tricks script to run from 2 levels up
import warnings
warnings.simplefilter('ignore')
import os
from amrlib.utils.logging import silence_penman, setup_logging, WARN, ERROR
from amrlib.models.parse_t5.inference import Inference
from amrlib.models.parse_t5.penman_serializer import load_and_serialize
# Note tdata_gsii was created with 30_Model_Parse_GSII/10_Annotate_Corpus.py and 12_RemoveWikiData.py
# This can be changed. The corpus doesn't need to be annotated (you can skip running 10_x) but
# wikidata should be removed since the model doesn't produce those tags and these graphs will be
# copied as the reference data to be scored in the next step.
if __name__ == '__main__':
setup_logging(logfname='logs/parse_t5_generate.log', level=ERROR)
silence_penman()
device = 'cuda:0'
corpus_dir = 'amrlib/data/tdata_gsii/'
ref_in_fn = 'test.txt.features.nowiki' # 1898 amr entries
model_dir = 'amrlib/data/model_parse_t5'
ref_out_fn = 'test.txt.reference'
gen_out_fn = 'test.txt.generated'
# Works using GTX TitanX (12GB)
# Note that the more beams, the better chance of getting a correctly deserialized graph
# greedy (num_beams=1, batch_size=32) run-time = 12m
# (num_beams=4, batch_size=12) run-time = 50m
# (num_beams=8, batch_size=6) run-time = 1h20
# (num_beams=16, batch_size=3) run-time = 2h30m
num_beams = 4
batch_size = 12
max_entries = None # max test data to generate (use None for everything)
fpath = os.path.join(corpus_dir, ref_in_fn)
print('Loading test data', fpath)
entries = load_and_serialize(fpath)
ref_graphs = entries['graphs'][:max_entries]
ref_serials = entries['serials'][:max_entries]
ref_sents = entries['sents'][:max_entries]
print('Loading model, tokenizer and data')
inference = Inference(model_dir, batch_size=batch_size, num_beams=num_beams, device=device)
print('Generating')
gen_graphs = inference.parse_sents(ref_sents, disable_progress=False)
assert len(gen_graphs) == len(ref_serials)
# Save the reference and generated graphs, omitting any that are None
ref_fname = os.path.join(model_dir, ref_out_fn)
gen_fname = os.path.join(model_dir, gen_out_fn)
f_ref = open(ref_fname, 'w')
f_gen = open(gen_fname, 'w')
print('Saving %s and %s' % (ref_fname, gen_fname))
skipped = 0
for ref_graph, gen_graph in zip(ref_graphs, gen_graphs):
if gen_graph is None:
skipped += 1
continue
f_ref.write(ref_graph + '\n\n')
f_gen.write(gen_graph + '\n\n')
f_ref.close()
f_gen.close()
print('Out of %d graphs, skipped %d that did not deserialize properly.' % (len(ref_graphs), skipped))
print()
|
firefly/views/post.py | matrixorz/firefly | 247 | 12608576 | # coding=utf-8
from __future__ import absolute_import
from flask.views import MethodView
from flask.blueprints import Blueprint
from flask_mongoengine.wtf import model_form
from firefly.models.topic import Post, Comment
from firefly.libs.template import render_template
from firefly.libs.markdown import Markdown
from firefly.views.utils import short_timesince
bp = Blueprint("post", __name__, url_prefix="/post")
def gen_author(p):
class c(object):
id = 100001
name = 'test1'
avatar = lambda x: 'https://meta-discourse.global.ssl.fastly.net/user_avatar/meta.discourse.org/codinghorror/90/5297.png' # noqa
cn = 'Test'
def url(self):
return '/user/1000001'
author = p.author if p.author else c()
return author
def gen_author_name(p):
author = gen_author(p)
return author.username if hasattr(author, 'username') and \
author.username else author.cn
class DetailView(MethodView):
form = model_form(Comment, exclude=['created_at', 'author', 'id'])
def get(self, id):
post = Post.objects.get_or_404(id=id)
Post.objects(id=id).update_one(inc__views=1)
return render_template('posts/detail.html', post=post,
Markdown=Markdown, gen_author=gen_author,
gen_author_name=gen_author_name,
short_timesince=short_timesince)
bp.add_url_rule('/<int:id>/', view_func=DetailView.as_view('detail'))
|
stores/apps/store_admin/urls.py | diassor/CollectorCity-Market-Place | 135 | 12608609 | <gh_stars>100-1000
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'store_admin.views.home_admin', name='home_admin'),
url(r'^change_qty/$', 'store_admin.views.ajax_change_qty', name='ajax_change_qty'),
url(r'^change_price/$', 'store_admin.views.ajax_change_price', name='ajax_change_price'),
url(r'^back/$', 'store_admin.views.back_to_site', name='back_to_site'),
url(r'^customers/$', 'store_admin.views.customers_overview', name='customers'),
url(r'^customers/overview/$', 'store_admin.views.customers_overview', name='customers_overview'),
url(r'^customers/profiles/$', 'store_admin.views.customers_profiles', name='customers_profiles'),
url(r'^customers/sold_items/$', 'store_admin.views.customers_sold_items', name='customers_sold_items'),
url(r'^customers/payments/$', 'store_admin.views.customers_payments', name='customers_payments'),
url(r'^customers/shipments/$', 'store_admin.views.customers_shipments', name='customers_shipments'),
url(r'^customers/wish_lists/$', 'store_admin.views.customers_wish_lists', name='customers_wish_lists'),
url(r'^customers/wish_lists/send_notification/([\d]+)/$', 'store_admin.views.customers_send_notification', name='customers_send_notification'),
url(r'^customers/mailing_list/$', 'store_admin.views.customers_mailing_list', name='customers_mailing_list'),
url(r'^customers/mailing_list/export/$', 'store_admin.views.customers_export_mailinglist', name='customers_export_mailinglist'),
url(r'^web_store/$', 'store_admin.views.web_store_overview', name='web_store'),
url(r'^web_store/overview/$', 'store_admin.views.web_store_overview', name='web_store_overview'),
url(r'^web_store/marketing/$', 'preferences.views.marketing', name='web_store_marketing'),
url(r'^web_store/shows/$', 'preferences.views.shows', name='web_store_shows'),
url(r'^web_store/shows/add/$', 'preferences.views.add_show', name='web_store_add_show'),
url(r'^web_store/shows/edit/([\d]+)/$', 'preferences.views.edit_show', name='web_store_edit_show'),
url(r'^web_store/show_go/([\d]+)/$', 'preferences.views.show_go', name='web_store_show_go'),
url(r'^web_store/show_not_go/([\d]+)/$', 'preferences.views.show_not_go', name='web_store_show_not_go'),
url(r'^web_store/theme/$', 'themes.views.theme', name='web_store_theme'),
url(r'^web_store/theme/([\d]+)/$', 'themes.views.theme', name='web_store_theme'),
url(r'^web_store/pages/$', 'blog_pages.views.page_create', name='web_store_pages'),
url(r'^web_store/blogs/$', 'blog_pages.views.post_add', name='web_store_blogs'),
url(r'^web_store/navigation/$', 'blog_pages.views.navigation', name='web_store_navigation'),
url(r'^inventory/$', 'store_admin.views.inventory_overview', name='inventory'),
url(r'^inventory/overview/$', 'store_admin.views.inventory_overview', name='inventory_overview'),
url(r'^inventory/items/$', 'store_admin.views.inventory_items', name='inventory_items'),
url(r'^inventory/items/import/$', 'store_admin.views.inventory_items_import', name='inventory_items_import'),
url(r'^inventory/lots/$', 'store_admin.views.inventory_lots', name='inventory_lots'),
url(r'^inventory/auctions/$', 'store_admin.views.inventory_auctions', name='inventory_auctions'),
url(r'^inventory/carts/$', 'store_admin.views.inventory_carts', name='inventory_carts'),
url(r'^inventory/carts/delete/$', 'store_admin.views.delete_cart_items', name='delete_cart_items'),
url(r'^inventory/carts/notify/([\d]+)/$', 'store_admin.views.notify_to_buyer', name='notify_to_buyer'),
url(r'^inventory/carts/buyer/([\d]+)/$', 'store_admin.views.buyer_info', name='buyer_info'),
url(r'^inventory/category/$', 'store_admin.views.inventory_categorize', name='inventory_categorize'),
url(r'^account/$', 'preferences.views.change_profile', name='account'),
url(r'^account_profile/$', 'preferences.views.change_profile', name='account_profile'),
url(r'^account_password/$', 'preferences.views.change_password', name='account_password'),
url(r'^account/add_photo/$', 'store_admin.views.add_profile_photo', name='add_profile_photo'),
url(r'^preferences/$', 'store_admin.views.preferences_overview', name='preferences'),
url(r'^support/$', 'store_admin.views.support_overview', name='support'),
url(r'^support/overview/$', 'store_admin.views.support_overview', name='support_overview'),
url(r'^support/email/$', 'store_admin.views.support_email', name='support_email'),
url(r'^support/phone/$', 'store_admin.views.support_phone', name='support_phone'),
url(r'^support/community/$', 'store_admin.views.support_community', name='support_community'),
url(r'^enable_feature/(?P<feature>[\w]+)/$', 'store_admin.views.enable_feature', name='enable_feature'),
url(r'^enable_feature/(?P<feature>[\w]+)/(?P<hook>[\w]+)/$', 'store_admin.views.enable_feature', name='enable_feature'),
url(r'^charge/(?P<feature>[\w]+)/$', 'store_admin.views.ajax_do_charge', name='ajax_do_charge'),
) |
recipes/Python/499336_Summarizing_XHTML/recipe-499336.py | tdiprima/code | 2,023 | 12608700 | <reponame>tdiprima/code<filename>recipes/Python/499336_Summarizing_XHTML/recipe-499336.py
# -*- encoding: utf-8 -*-
import re
_tagopenre = re.compile(r'(?P<starws>\s?)<(?P<tagname>[^/][^> /]*)(?P<tagcontents>[^>]*)>(?P<endws>\s?)',re.MULTILINE)
_tagclosere = re.compile(r'(?P<startws>\s?)</(?P<tagname>[^>]+)>(?P<endws>\s?)')
_tagselfre = re.compile(r'(?P<startws>\s?)<(?P<tag>[^\d/>][^/>]*)/>(?P<endws>\s?)',re.MULTILINE)
_tagre = re.compile(r'<([^>]+)>', re.MULTILINE)
_selfclosere = re.compile('/\s*>\s*$')
_wsre = re.compile('\s+', re.MULTILINE)
_wsgtre = re.compile('\s+|>', re.MULTILINE)
def _find_ws(s, start=0, end=None):
'''Find whitespace. Interface similar to str.find.'''
if end is not None:
s = s[start:end]
else:
s = s[start:]
x = _wsre.search(s)
if x:
return x.start()+start
else:
return -1
def _find_ws_or_gt(s, start=0, end=None):
'''Find whitespace or greater than ('>') sign. Interface similar to str.find.'''
if end is not None:
s = s[start:end]
else:
s = s[start:]
x = _wsgtre.search(s)
if x:
return x.start()+start
else:
return -1
def summarize_html(html, maxwords = 25):
if html is None:
return ''
tagopen = _tagopenre
tagclose = _tagclosere
tagre = _tagre
tagself = _tagselfre
tags = [0]
taglist = []
def tagopen_sub(match):
tag = match.string[match.start():match.end()]
taglist.append(tag)
if _selfclosere.search(tag):
r = '<%d/>'%tags[0]
else:
r = '<%d>'%tags[0]
tags[0] += 1
return r
def tagclose_sub(match):
r = '</%d>'%tags[0]
taglist.append(match.string[match.start():match.end()])
tags[0] += 1
return r
def tagself_sub(match):
r = '<%d/>'%tags[0]
taglist.append(match.string[match.start():match.end()])
tags[0] += 1
return r
# preprocess text, fill taglist
tagged = html.replace(' ', ' ')
tagged = tagopen.sub(tagopen_sub, tagged)
tagged = tagclose.sub(tagclose_sub, tagged)
tagged = tagself.sub(tagself_sub, tagged)
tagged = tagre.sub(r' <\1> ', tagged)
# setup for processing
splittags = tagged.split()
alist = [None]*len(splittags)
words = 0
tagstack = []
addspace = False
do_not_add_dots = False
is_table_row = 0 # misnamed: used to count nested table rows
was_table_row = False
for i,elem in enumerate(splittags):
# modifying the list you're iterating is a crime, so we make ourselves
# another one and fill it when needed
alist[i] = elem
# end condition
if words >= maxwords and not is_table_row:
# special case: tables
if was_table_row:
do_not_add_dots = True
break
# an usual word
if not elem.startswith('<'):
words += 1
if addspace:
alist[i] = ' '+elem
addspace = True
# a opening tag
elif not elem.startswith('</') and not elem.endswith('/>'):
tag = taglist[int(elem[1:-1])]
tested = tag.strip()
if tested[:3] == '<tr' and _find_ws(tested[:4]) in (-1, 3):
is_table_row += 1
# comment tags need not be closed
if not tested.startswith('<!'):
tagstack.append(tag)
alist[i] = tag
addspace = False
# a closing tag
elif not elem.endswith('/>'):
addspace = False
try:
top = tagstack[-1]
except IndexError:
raise ValueError('tag not opened: %s'%elem)
# extract the tagname from top of the stack
top = top[top.find('<')+1:top.find('>')]
cut = _find_ws(top)
if cut > 0:
top = top[:cut]
# extract the tagname from the tag list
fromlist = taglist[int(elem[2:-1])]
tag = fromlist[fromlist.find('/')+1:fromlist.find('>')]
if top != tag:
raise ValueError('tag not closed properly: %s, got %s'%(top, tag))
# special case: tables
# some other tags could use special-casing, like dt/dd
if top == 'tr':
is_table_row -= 1
was_table_row = True
if is_table_row < 0:
is_table_row = 0
else:
was_table_row = False
# close the tag
tagstack.pop()
alist[i] = fromlist
# a self-closing tag
else:
tag = taglist[int(elem[1:-2])]
alist[i] = tag
addspace = False
else:
do_not_add_dots = True
i += 1
if words < maxwords:
# normalize whitespace (actually not needed... makes the tests pass, though)
return _wsre.sub(' ', html)
# take care so no monstrousities like '......' appear at the end of output
if alist[i-1][-1] in ('.', ',', ':', '?', '!'):
alist[i-1] = re.sub(r'[\.,:?!]+$', '', alist[i-1])
# close remaining open tags
tagstack.reverse()
if not do_not_add_dots:
result = alist[:i] + ['...'] + ['</%s>'%x[x.find('<')+1:_find_ws_or_gt(x,1)] for x in tagstack]
elif was_table_row:
result = alist[:i] + ['</%s>'%x[x.find('<')+1:_find_ws_or_gt(x,1)] for x in tagstack]
else:
result = alist[:i] + ['</%s>'%x[x.find('<')+1:_find_ws_or_gt(x,1)] for x in tagstack]
# normalize whitespace...
r = _wsre.sub(' ', ''.join(result))
return r
|
notebook/opencv_face_detection.py | vhn0912/python-snippets | 174 | 12608736 | <filename>notebook/opencv_face_detection.py
import cv2
img = cv2.imread('data/src/lena_square.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(type(img), img.shape)
print(type(img_gray), img_gray.shape)
# <class 'numpy.ndarray'> (512, 512, 3)
# <class 'numpy.ndarray'> (512, 512)
cascade_path = '/usr/local/Cellar/opencv3/3.2.0/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml'
cascade = cv2.CascadeClassifier(cascade_path)
faces = cascade.detectMultiScale(img_gray)
print(faces)
# [[215 201 175 175]]
for x, y, w, h in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.imwrite('data/dst/lena_square_face.jpg', img)
# True
|
tkinter/combobox-update-another-combobox/main.py | whitmans-max/python-examples | 140 | 12608793 | <filename>tkinter/combobox-update-another-combobox/main.py
# date: 2019.04.23
from tkinter import *
from tkinter.ttk import Combobox
data = {
"EMEA": [1105,1106],
"ASIA": [3565,2445, 126464, 1145454],
"AMERICA": [56464 ,5679, 55346],
}
def on_select(event):
print(event)
selecte_value = event.widget.get()
if selecte_value in data:
cb2['values'] = data[selecte_value]
else:
cb2['values'] = all_values
country = list(data.keys())
all_values = sorted(sum(data.values(), []))
window = Tk()
cb1 = Combobox(window, values=country)
cb1.bind('<<ComboboxSelected>>', on_select)
cb1.bind('<Return>', on_select) # pressed ENTER
cb1.pack()
cb2 = Combobox(window, values=all_values)
cb2.pack()
window.mainloop()
|
src/salt/base/ext/_utils/lsm6dsl_conn.py | autopi-io/autopi-core | 133 | 12608798 | import ctypes
import logging
import RPi.GPIO as gpio
import time
import yaml
from common_util import dict_key_by_value
from i2c_conn import I2CConn
log = logging.getLogger(__name__)
DEBUG = log.isEnabledFor(logging.DEBUG)
### Registers ###
FIFO_CTRL1 = 0x06 # FIFO configuration registers
FIFO_CTRL2 = 0x07
FIFO_CTRL3 = 0x08
FIFO_CTRL4 = 0x09
FIFO_CTRL5 = 0x0A
INT1_CTRL = 0x0D # INT1 pin control
INT2_CTRL = 0x0E # INT2 pin control
WHO_AM_I = 0x0F # Who I Am ID
CTRL1_XL = 0x10 # Accelerometer and gyroscope control registers
CTRL2_G = 0x11
CTRL3_C = 0x12
CTRL4_C = 0x13
CTRL5_C = 0x14
CTRL6_C = 0x15
CTRL7_G = 0x16
CTRL8_XL = 0x17
CTRL9_XL = 0x18
CTRL10_C = 0x19
OUTX_L_G = 0x22 # Gyroscope outpur registers for user interface
OUTX_H_G = 0x23
OUTY_L_G = 0x24
OUTY_H_G = 0x25
OUTZ_L_G = 0x26
OUTZ_H_G = 0x27
OUTX_L_XL = 0x28 # Accelerometer output registers
OUTX_H_XL = 0x29
OUTY_L_XL = 0x2A
OUTY_H_XL = 0x2B
OUTZ_L_XL = 0x2C
OUTZ_H_XL = 0x2D
FIFO_STATUS1 = 0x3A # FIFO status registers
FIFO_STATUS2 = 0x3B
FIFO_STATUS3 = 0x3C
FIFO_STATUS4 = 0x3D
FIFO_DATA_OUT_L = 0x3E # FIFO data output registers
FIFO_DATA_OUT_H = 0x3F
### Masks ###
FIFO_CTRL2_FTH_MASK = 0x07
FIFO_CTRL3_DEC_FIFO_GYRO_MASK = 0x38
FIFO_CTRL3_DEC_FIFO_XL_MASK = 0x07
FIFO_CTRL5_ODR_MASK = 0x78
FIFO_CTRL5_MODE_MASK = 0x07
CTRL1_XL_ODR_MASK = 0xF0
CTRL1_XL_FS_MASK = 0x0C
CTRL3_C_BOOT_MASK = (0x01 << 7)
CTRL3_C_BDU_MASK = (0x01 << 6)
CTRL3_C_H_LACTIVE_MASK = (0x01 << 5)
CTRL3_C_PP_OD_MASK = (0x01 << 4)
CTRL3_C_SIM_MASK = (0x01 << 3)
CTRL3_C_IF_INC_MASK = (0x01 << 2)
CTRL3_C_BLE_MASK = (0x01 << 1)
CTRL3_C_SW_RESET_MASK = 0x01
FIFO_STATUS2_WATERM_MASK = 0x80
FIFO_STATUS2_OVER_RUN_MASK = 0x40
FIFO_STATUS2_FIFO_FULL_SMART_MASK = 0x20
FIFO_STATUS2_FIFO_EMPTY = 0x10
FIFO_STATUS2_DIFF_FIFO_MASK = 0x07
### Maps ###
# Accelerometer Output Data Rate (ODR)
XL_ODR_OFF = (0x0 << 4)
XL_ODR_12_5HZ = (0x1 << 4)
XL_ODR_26HZ = (0x2 << 4)
XL_ODR_52HZ = (0x3 << 4)
XL_ODR_104HZ = (0x4 << 4)
XL_ODR_208HZ = (0x5 << 4)
XL_ODR_416HZ = (0x6 << 4)
XL_ODR_833HZ = (0x7 << 4)
XL_ODR_1_66KHZ = (0x8 << 4)
XL_ODR_3_33KHZ = (0x9 << 4)
XL_ODR_6_66KHZ = (0xA << 4)
XL_ODR_DEFAULT = XL_ODR_OFF
# Accelerometer ODR map
CTRL1_XL_ODR_MAP = {
XL_ODR_OFF: 0,
XL_ODR_12_5HZ: 12.5,
XL_ODR_26HZ: 26,
XL_ODR_52HZ: 52,
XL_ODR_104HZ: 104,
XL_ODR_208HZ: 208,
XL_ODR_416HZ: 416,
XL_ODR_833HZ: 833,
XL_ODR_1_66KHZ: 1660,
XL_ODR_3_33KHZ: 3330,
XL_ODR_6_66KHZ: 6660,
}
# Accelerometer Full Scale
XL_FS_2G = (0x0 << 2)
XL_FS_4G = (0x2 << 2)
XL_FS_8G = (0x3 << 2)
XL_FS_16G = (0x1 << 2) # odd, I know. Look at the DS
XL_FS_DEFAULT = XL_FS_2G
CTRL1_XL_FS_MAP = {
XL_FS_2G: 2,
XL_FS_4G: 4,
XL_FS_8G: 8,
XL_FS_16G: 16,
}
XL_FS_SCALE_MAP = {
2: 0.061,
4: 0.122,
8: 0.244,
16: 0.488,
}
# FIFO gyro decimations
DEC_FIFO_GYRO_NOT_IN_FIFO = (0x0 << 3)
DEC_FIFO_GYRO_NO_DEC = (0x1 << 3)
DEC_FIFO_GYRO_FAC_2 = (0x2 << 3)
DEC_FIFO_GYRO_FAC_3 = (0x3 << 3)
DEC_FIFO_GYRO_FAC_4 = (0x4 << 3)
DEC_FIFO_GYRO_FAC_8 = (0x5 << 3)
DEC_FIFO_GYRO_FAC_16 = (0x6 << 3)
DEC_FIFO_GYRO_FAC_32 = (0x7 << 3)
# FIFO gyro decimation map
FIFO_CTRL3_DEC_FIFO_GYRO_MAP = {
DEC_FIFO_GYRO_NOT_IN_FIFO: -1,
DEC_FIFO_GYRO_NO_DEC: 0,
DEC_FIFO_GYRO_FAC_2: 2,
DEC_FIFO_GYRO_FAC_3: 3,
DEC_FIFO_GYRO_FAC_4: 4,
DEC_FIFO_GYRO_FAC_8: 8,
DEC_FIFO_GYRO_FAC_16: 16,
DEC_FIFO_GYRO_FAC_32: 32,
}
# FIFO acc decimations
DEC_FIFO_XL_NOT_IN_FIFO = 0x0
DEC_FIFO_XL_NO_DEC = 0x1
DEC_FIFO_XL_FAC_2 = 0x2
DEC_FIFO_XL_FAC_3 = 0x3
DEC_FIFO_XL_FAC_4 = 0x4
DEC_FIFO_XL_FAC_8 = 0x5
DEC_FIFO_XL_FAC_16 = 0x6
DEC_FIFO_XL_FAC_32 = 0x7
# FIFO acc decimation map
FIFO_CTRL3_DEC_FIFO_XL_MAP = {
DEC_FIFO_XL_NOT_IN_FIFO: -1,
DEC_FIFO_XL_NO_DEC: 0,
DEC_FIFO_XL_FAC_2: 2,
DEC_FIFO_XL_FAC_3: 3,
DEC_FIFO_XL_FAC_4: 4,
DEC_FIFO_XL_FAC_8: 8,
DEC_FIFO_XL_FAC_16: 16,
DEC_FIFO_XL_FAC_32: 32,
}
# FIFO Output Data Rate (ODR) values
FIFO_ODR_DISABLED = (0x0 << 3)
FIFO_ODR_12_5HZ = (0x1 << 3)
FIFO_ODR_26HZ = (0x2 << 3)
FIFO_ODR_52HZ = (0x3 << 3)
FIFO_ODR_104HZ = (0x4 << 3)
FIFO_ODR_208HZ = (0x5 << 3)
FIFO_ODR_416HZ = (0x6 << 3)
FIFO_ODR_833HZ = (0x7 << 3)
FIFO_ODR_1_66KHZ = (0x8 << 3)
FIFO_ODR_3_33KHZ = (0x9 << 3)
FIFO_ODR_6_66KHZ = (0xA << 3)
# FIFO ODR map
FIFO_CTRL5_ODR_MAP = {
FIFO_ODR_DISABLED: 0,
FIFO_ODR_12_5HZ: 12.5,
FIFO_ODR_26HZ: 26,
FIFO_ODR_52HZ: 52,
FIFO_ODR_104HZ: 104,
FIFO_ODR_208HZ: 208,
FIFO_ODR_416HZ: 416,
FIFO_ODR_833HZ: 833,
FIFO_ODR_1_66KHZ: 1660,
FIFO_ODR_3_33KHZ: 3330,
FIFO_ODR_6_66KHZ: 6660,
}
# FIFO Modes
FIFO_MODE_BYPASS = 0x0 # aka disabled mode
FIFO_MODE_FIFO = 0x1
FIFO_MODE_CONTINUOUS = 0x6
# there's more, but I have no clue how they work or how to describe them
# FIFO Mode map
FIFO_CTRL5_MODE_MAP = {
FIFO_MODE_BYPASS: 'bypass', # aka disabled mode
FIFO_MODE_FIFO: 'fifo',
FIFO_MODE_CONTINUOUS: 'continuous',
}
### Interrupts ###
INT1_DRDY_XL = (1 << 0)
INT1_DRDY_G = (1 << 1)
INT1_BOOT = (1 << 2)
INT1_FTH = (1 << 3)
INT1_FIFO_OVR = (1 << 4)
INT1_FULL_FLAG = (1 << 5)
INT1_SIGN_MOT = (1 << 6)
INT1_STEP_DETECTOR = (1 << 7)
INT1_SOURCES = {
INT1_DRDY_XL: "acc_data",
INT1_DRDY_G: "gyro_data",
INT1_BOOT: "boot",
INT1_FTH: "fifo_threshold",
INT1_FIFO_OVR: "fifo_overrun",
INT1_FULL_FLAG: "fifo_full_flag",
INT1_SIGN_MOT: "motion",
INT1_STEP_DETECTOR: "step",
}
INT2_DRDY_XL = (1 << 0)
INT2_DRDY_G = (1 << 1)
INT2_DRDY_TEMP = (1 << 2)
INT2_FTH = (1 << 3)
INT2_FIFO_OVR = (1 << 4)
INT2_FULL_FLAG = (1 << 5)
INT2_STEP_COUNT_OV = (1 << 6)
INT2_STEP_DELTA = (1 << 7)
INT2_SOURCES = {
INT2_DRDY_XL: "acc_data",
INT2_DRDY_G: "gyro_data",
INT2_DRDY_TEMP: "temp_data",
INT2_FTH: "fifo_threshold",
INT2_FIFO_OVR: "fifo_overrun",
INT2_FULL_FLAG: "fifo_full_flag",
INT2_STEP_COUNT_OV: "step_counter_overflow",
INT2_STEP_DELTA: "step_delta",
}
class LSM6DSLConn(I2CConn):
def __init__(self, settings):
super(LSM6DSLConn, self).__init__()
self._settings = settings
self._xl_fs = None
self._xl_scale = None
self._xl_odr = None
self.init(settings)
@property
def settings(self):
return self._settings
@property
def range(self):
"""
Added for backward compatibility.
"""
return self._xl_fs or self._settings.get("range", self._settings.get("acc_full_scale", CTRL1_XL_FS_MAP[XL_FS_DEFAULT]))
@property
def rate(self):
"""
Added for backward compatibility.
"""
return self._xl_odr or self._settings.get("rate", self._settings.get("acc_output_data_rate", CTRL1_XL_ODR_MAP[XL_ODR_DEFAULT]))
def open(self):
super(LSM6DSLConn, self).open()
try:
# Reset if requested
if self._settings.get("reset", False):
self.reset(confirm=True)
# Give time to recover after reset
time.sleep(.1)
self.configure()
except:
log.exception("Failed to configure after opening connection")
# Ensure connection is closed again
self.close()
raise
def configure(self, **settings):
"""
Applies specific settings.
"""
self._settings.update(settings)
if "block_data_update" in self._settings:
self.block_data_update(value=self._settings["block_data_update"])
if "intr_activation_level" in self._settings:
self.intr_activation_level(value=self._settings["intr_activation_level"])
# Setup interrupts if available
if self._settings.get("interrupts", None):
for pin, enabled_sources in self._settings["interrupts"].items():
for source in enabled_sources:
self.intr(source, pin, value=True)
self.acc_full_scale(value=self._settings.get("range", self._settings.get("acc_full_scale", CTRL1_XL_FS_MAP[XL_FS_DEFAULT])))
self.acc_output_data_rate(value=self._settings.get("rate", self._settings.get("acc_output_data_rate", CTRL1_XL_ODR_MAP[XL_ODR_DEFAULT])))
def reset(self, confirm=False):
if not confirm:
raise Exception("This action will reset the LSM6DSL chip. This loses all settings applied. Add 'confirm=true' to force the operation")
log.info("Performing software reset of device")
self.read_write(CTRL3_C, CTRL3_C_SW_RESET_MASK, 1)
def block_data_update(self, value=None):
"""
Get or set block data update setting.
value = False: Continuous update
value = True: Output registers not updated until MSB and LSB have been read
"""
if value == None:
res = self.read(CTRL3_C) & CTRL3_C_BDU_MASK
else:
val = CTRL3_C_BDU_MASK if bool(value) else 0
res = self.read_write(CTRL3_C, CTRL3_C_BDU_MASK, val) & CTRL3_C_BDU_MASK
return bool(res)
def intr_activation_level(self, value=None):
"""
Get or set interrupt activation level
value = False: Interrupt output pads active high
value = True: Interrupt output pads active low
"""
if value == None:
res = self.read(CTRL3_C) & CTRL3_C_H_LACTIVE_MASK
else:
val = CTRL3_C_H_LACTIVE_MASK if bool(value) else 0
res = self.read_write(CTRL3_C, CTRL3_C_H_LACTIVE_MASK, val) & CTRL3_C_H_LACTIVE_MASK
return bool(res)
def intr(self, source, pin, value=None):
"""
Get or set interrupt registers.
Parameters:
- source (string): the name of the interrupt source. Take a look at INT1_SOURCES and
INT2_SOURCES
- pin (string): The name of the pin. One of 'int1' or 'int2'.
- value (bool): Default None. Whether the interrupt source should be active. If no value
provided, the current value is queried and returned.
"""
if pin == 'int1':
mask = dict_key_by_value(INT1_SOURCES, source)
register = INT1_CTRL
elif pin == 'int2':
mask = dict_key_by_value(INT2_SOURCES, source)
register = INT2_CTRL
else:
raise ValueError('Unrecognized pin {}.'.format(pin))
if value == None:
res = self.read(register) & mask
else:
val = mask if bool(value) else 0
res = self.read_write(register, mask, val) & mask
return bool(res)
def xyz(self, **kwargs):
return self.acc_xyz(**kwargs)
def acc_xyz(self, decimals=4):
ret = {}
buf = self.read(OUTX_L_XL, length=6)
x = self._signed_int((buf[0] | (buf[1] << 8)), bits=16) * self._xl_scale / 1000.0
y = self._signed_int((buf[2] | (buf[3] << 8)), bits=16) * self._xl_scale / 1000.0
z = self._signed_int((buf[4] | (buf[5] << 8)), bits=16) * self._xl_scale / 1000.0
ret["x"] = round(x, decimals)
ret["y"] = round(y, decimals)
ret["z"] = round(z, decimals)
return ret
def acc_output_data_rate(self, value=None):
"""
Gets or sets the output data rate (ODR) for the accelerometer. If value is not provided, the
current ODR will be read. Otherwise, the value will be set as the new ODR. Returns the
ODR value.
Parameters:
- value: None or a number. View possible values in `CTRL1_XL_ODR_MAP`
"""
if value == None:
# read current value
res = self.read(CTRL1_XL) & CTRL1_XL_ODR_MASK
else:
# set new value
val = dict_key_by_value(CTRL1_XL_ODR_MAP, value)
res = self.read_write(CTRL1_XL, CTRL1_XL_ODR_MASK, val) & CTRL1_XL_ODR_MASK
self._xl_odr = CTRL1_XL_ODR_MAP[res]
return self._xl_odr
def acc_full_scale(self, value=None):
"""
Gets or sets the full scale (FS) for the accelerometer. If value is not provided, the current
FS will be read.
Parameters:
- value: None or a number. View possible values in `CTRL1_XL_FS_MAP`
"""
if value == None:
# read current value
res = self.read(CTRL1_XL) & CTRL1_XL_FS_MASK
else:
# set new value
val = dict_key_by_value(CTRL1_XL_FS_MAP, value)
res = self.read_write(CTRL1_XL, CTRL1_XL_FS_MASK, val) & CTRL1_XL_FS_MASK
self._xl_fs = CTRL1_XL_FS_MAP[res]
self._xl_scale = XL_FS_SCALE_MAP[self._xl_fs]
return self._xl_fs
def fifo_status(self):
"""
Get current FIFO status. Returns a dictionary with the following keys:
- unread_words (number): The number of unread FIFO entries
- watermark_raised (bool): Whether the FIFO threshold value has been reached.
Check out fifo_threshold function.
- overrun (bool): Is FIFO full?
- empty (bool): Is FIFO empty?
"""
ret = {}
res_status_1 = self.read(FIFO_STATUS1)
res_status_2 = self.read(FIFO_STATUS2)
ret['unread_words'] = res_status_1 | ( (res_status_2 & FIFO_STATUS2_DIFF_FIFO_MASK) << 8 )
ret['watermark_raised'] = bool(res_status_2 & FIFO_STATUS2_WATERM_MASK)
ret['overrun'] = bool(res_status_2 & FIFO_STATUS2_OVER_RUN_MASK)
ret['empty'] = bool(res_status_2 & FIFO_STATUS2_FIFO_EMPTY)
return ret
def fifo_threshold(self, value=None):
"""
Get or set FIFO threshold. Watermark flag rises when the number of bytes written to FIFO
after the next write is greater than or equal to the threshold level.
- value (number): a number between 0 and 2047, the number of entries which will raise the
watermark flag
"""
if value == None:
res_1 = self.read(FIFO_CTRL1)
res_2 = self.read(FIFO_CTRL2) & FIFO_CTRL2_FTH_MASK
res = res_1 | (res_2 << 8)
else:
if type(value) != int or value < 0 or value > 2047:
raise ValueError('Value must be an integer between 0 and 2047, inclusive.')
ctrl_1_val = value & 0x0FF
ctrl_2_val = (value & 0x700) >> 8
res_1 = self.read_write(FIFO_CTRL1, 0xFF, ctrl_1_val) # accept all, we just want the new value back
res_2 = self.read_write(FIFO_CTRL2, FIFO_CTRL2_FTH_MASK, ctrl_2_val) & FIFO_CTRL2_FTH_MASK
res = res_1 | (res_2 << 8)
return res
def fifo_acc_decimation(self, value=None):
"""
Get or set FIFO accelerometer decimation. Look at FIFO_CTRL3_DEC_XL_MAP for available values.
"""
if value == None:
res = self.read(FIFO_CTRL3) & FIFO_CTRL3_DEC_FIFO_XL_MASK
else:
val = dict_key_by_value(FIFO_CTRL3_DEC_FIFO_XL_MAP, value)
res = self.read_write(FIFO_CTRL3, FIFO_CTRL3_DEC_FIFO_XL_MASK, val) & FIFO_CTRL3_DEC_FIFO_XL_MASK
return FIFO_CTRL3_DEC_FIFO_XL_MAP[res]
def fifo_gyro_decimation(self, value=None):
"""
Get or set FIFO gyroscope decimation. Look at FIFO_CTRL3_DEC_GYRO_MAP for available values.
"""
raise NotImplementedError('Not implemented.')
def fifo_odr(self, value=None):
"""
Get or set FIFO ODR setting.
Look at FIFO_CTRL5_ODR_MAP for available values.
"""
if value == None:
res = self.read(FIFO_CTRL5) & FIFO_CTRL5_ODR_MASK
else:
val = dict_key_by_value(FIFO_CTRL5_ODR_MAP, value)
res = self.read_write(FIFO_CTRL5, FIFO_CTRL5_ODR_MASK, val) & FIFO_CTRL5_ODR_MASK
return FIFO_CTRL5_ODR_MAP[res]
def fifo_mode(self, value=None):
"""
Get or set FIFO mode setting.
Look at FIFO_CTRL_5_MODE_MAP for available values.
"""
if value == None:
res = self.read(FIFO_CTRL5) & FIFO_CTRL5_MODE_MASK
else:
val = dict_key_by_value(FIFO_CTRL5_MODE_MAP, value)
res = self.read_write(FIFO_CTRL5, FIFO_CTRL5_MODE_MASK, val) & FIFO_CTRL5_MODE_MASK
return FIFO_CTRL5_MODE_MAP[res]
"""
def gyro_xyz(self):
ret = {}
buf = self.read(OUTX_L_G, length=6)
x = self._signed_int((buf[0] | (buf[1] << 8)), bits=16) / 1000.0
y = self._signed_int((buf[2] | (buf[3] << 8)), bits=16) / 1000.0
z = self._signed_int((buf[4] | (buf[5] << 8)), bits=16) / 1000.0
ret['x'] = round(x, 2)
ret['y'] = round(y, 2)
ret['z'] = round(z, 2)
return ret
def gyro_mode(self, active=None):
if active == None:
result = self.read(CTRL2_G)
return result
if active:
### 250 dps
#self.write(CTRL2_G, 0x50)
### 500 dps
#self.write(CTRL2_G, 0x54)
### 1000 dps
#self.write(CTRL2_G, 0x58)
### 2000 dps
self.write(CTRL2_G, 0x5C)
else:
self.write(CTRL2_G, 0)
""" |
backend/comment/notifications.py | restato/bunnybook | 131 | 12608849 | from uuid import UUID
from injector import singleton, inject
from comment.repo import CommentRepo
from notification.manager import NewNotification, NotificationManager
from post.repo import PostRepo
class NewCommentOnPost(NewNotification):
def __init__(self,
comment_author_id: UUID,
comment_author_username: str,
post_author_id: UUID,
post_author_username: str,
post_id: UUID,
comment_content: str):
super().__init__(
event="NEW_COMMENT_ON_POST",
payload={"byId": comment_author_id,
"byUsername": comment_author_username,
"postById": post_author_id,
"postByUsername": post_author_username,
"postId": post_id,
"commentPreview": f"{comment_content[:32]}..."
if len(comment_content) > 32 else comment_content})
@singleton
class CommentNotificationService:
@inject
def __init__(
self,
notification_manager: NotificationManager,
comment_repo: CommentRepo,
post_repo: PostRepo):
self._notification_manager = notification_manager
self._comment_repo = comment_repo
self._post_repo = post_repo
async def create_comment_notification(
self,
post_id: UUID,
comment_author_id: UUID,
comment_author_username: str,
comment_content: str) -> None:
post = (await self._post_repo.find_post_by_id(post_id))
recipients = await self._comment_repo.find_comments_authors_by_post_id(
post_id)
recipients.add(post.profile_id)
recipients.add(post.wall_profile_id)
recipients.remove(comment_author_id)
self._notification_manager.add_notification(NewCommentOnPost(
comment_author_id=comment_author_id,
comment_author_username=comment_author_username,
post_author_id=post.profile_id,
post_author_username=post.username,
post_id=post.id,
comment_content=comment_content), recipients)
|
booknlp/english/name_coref.py | ishine/booknlp | 539 | 12608866 | <gh_stars>100-1000
"""
This code performs name clustering on PROP PER mentions, grouping together different proper names only that refer to the same individual
e.g., Tom, <NAME>, Mr. <NAME>, Mr. Sawyer -> TOM SAYWER
"""
from collections import Counter
import sys
import itertools
import pkg_resources
class NameCoref:
def __init__(self, aliasFile):
self.honorifics={"mr":1, "mr.":1, "mrs":1, "mrs.":1, "miss":1, "uncle":1, "aunt":1, "lady":1, "lord":1, "monsieur":1, "master":1, "mistress":1}
self.aliases={}
with open(aliasFile) as file:
for line in file:
cols=line.rstrip().split("\t")
canonical=cols[0]
nicknames=cols[1:]
for nickname in nicknames:
if nickname.lower() not in self.aliases:
self.aliases[nickname.lower()]={}
self.aliases[nickname.lower()][canonical.lower()]=1
def get_variants(self, parts):
variants={}
for i in range(len(parts)):
if parts[i].lower() not in self.honorifics:
variants[parts[i]]=1
for j in range(i+1, len(parts)):
variants["%s %s" % (parts[i], parts[j])]=1
for k in range(j+1, len(parts)):
variants["%s %s %s" % (parts[i], parts[j], parts[k])]=1
for l in range(k+1, len(parts)):
variants["%s %s %s %s" % (parts[i], parts[j], parts[k], parts[l])]=1
for m in range(l+1, len(parts)):
variants["%s %s %s %s %s" % (parts[i], parts[j], parts[k], parts[l], parts[m])]=1
for n in range(m+1, len(parts)):
variants["%s %s %s %s %s %s" % (parts[i], parts[j], parts[k], parts[l], parts[m], parts[n])]=1
for o in range(n+1, len(parts)):
variants["%s %s %s %s %s %s %s" % (parts[i], parts[j], parts[k], parts[l], parts[m], parts[n], parts[o])]=1
return variants
def get_canonical(self, name_tokens):
"""
Given a alias dictionary that maps aliases (nicknames, alternative names) to a "canonical" version
of the name, return all possible canonical versions of the input
Alias dictionary:
Em -> Emily
Em -> Emma
The Great Bambino -> Babe Ruth
Input -> Output
Em -> [["Emily"], ["Emma"]]
<NAME> -> [["Emily", "Smith"], ["Emma", "Smith"]]
The Great Bambino -> [["Babe", "Ruth"]]
"""
# first, if a name is a complete match for an alias, just return the canonicals for the alias
# Em -> ["Emily", "Emma"]
# The Great Bambino -> ["<NAME>"]
name=' '.join(name_tokens).lower()
if name in self.aliases:
vals=[]
for can in self.aliases[name]:
vals.append(can.split(" "))
return vals
# next, check if any individual part of a name is an alias
# <NAME> -> ["<NAME>", "<NAME>"]
parts=[]
for tok in name_tokens:
if tok.lower() in self.aliases:
parts.append(list(self.aliases[tok.lower()]))
else:
parts.append([tok])
canonicals=[]
canonicals=[]
for i in itertools.product(*parts):
canonicals.append(list(i))
return canonicals
def name_cluster(self, entities, is_named, existing_refs):
"""
Get counts of all unique names (to be used for disambiguation later)
"""
uniq=Counter()
for i, val in enumerate(is_named):
if val == 1:
# only consider names with fewer than 10 tokens
# (longer are likely errors and significantly slow down processing)
if len(entities[i]) < 10:
name=' '.join(entities[i]).lower()
if name != "":
uniq[name]+=1
"""
Remove names that are complete subsets of others
e.g., if uniq = ["<NAME>", "Em", "<NAME>", "Tom", "<NAME>"], then remove "Em", "<NAME>" and "Tom".
* "Tom" is a subset of "<NAME>"
* "Em" -> "Emma" and "Emily", and "Emma" is a subset of "<NAME>"
* "<NAME>" -> "<NAME>"
"""
subsets={}
for name1 in uniq:
canonicals1=self.get_canonical(name1.split(" "))
for canonical1 in canonicals1:
name1set=set(canonical1)
for name2 in uniq:
if name1 == name2:
continue
canonicals=self.get_canonical(name2.split(" "))
for canonical in canonicals:
name2set=set(canonical)
if ' '.join(name1set) == ' '.join(name2set):
continue
if name1set.issuperset(name2set):
subsets[name2]=1
name_subpart_index={}
"""
Now map each possible ordered sub-permutation of a canonical name (from length 1 to n) to its canonical version.
e.g. the canonical name "<NAME>" ->
"<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "David", "Foster", "Wallace"
So if we see any of those phrases as names, we know they could refer to the entity "<NAME>"
This excludes honorifics like "Mr." and "Mrs." from being their own (unigram) variant, but they can appear in other permutations
e.g. "Mr. <NAME>" ->
"Mr. <NAME>", "Mr. Tom", "Mr. Sawyer", "<NAME>", "Tom", "Sawyer"
"""
for ni, name in enumerate(uniq):
if name in subsets:
continue
canonicals=self.get_canonical(name.split(" "))
for canonical in canonicals:
variants=self.get_variants(canonical)
for v in variants:
if v not in name_subpart_index:
name_subpart_index[v]={}
name_subpart_index[v][name]=1
"""
Now let's assign each name *mention* to its entity.
Starting from the first entity to last, we'll look up the possible entities that the canonical version of
this mention can be a variant of and assign it to the entity that has the highest score.
The score for an entity is initialized as the mention count of the maximal name
("<NAME>" or "Mr. <NAME>" above), but is primarily driven by recency (with the score
mainly being the location of the last assigned mention of that entity)
"""
charids={}
max_id=1
if len(existing_refs) > 0:
max_id=max(existing_refs)+1
lastSeen={}
refs=[]
for i, val in enumerate(is_named):
if existing_refs[i] != -1:
refs.append(existing_refs[i])
continue
if val == 1:
canonicals=self.get_canonical(entities[i])
name=' '.join(entities[i]).lower()
top=None
max_score=0
for canonical in canonicals:
canonical_name=' '.join(canonical).lower()
if canonical_name in name_subpart_index:
for entity in name_subpart_index[canonical_name]:
score=uniq[entity]
if entity in lastSeen:
score+=lastSeen[entity]
if score > max_score:
max_score=score
top=entity
if top is not None:
lastSeen[top]=i
if top not in charids:
charids[top]=max_id
max_id+=1
refs.append(charids[top])
# this happens if the name is too long (longer than 7 words)
else:
refs.append(-1)
else:
refs.append(-1)
return refs
def calc_overlap(self, small, big):
overlap=0
for name in small:
if name in big:
overlap+=small[name]
return overlap/sum(small.values())
def read_file(self, spanFile):
entities=[]
is_named=[]
with open(spanFile) as file:
for line in file:
cols=line.rstrip().split("\t")
cat=cols[2].split("_")[1]
prop=cols[2].split("_")[0]
if prop != "PROP":
continue
text=cols[0].split(" ")
lemma=cols[1]
entity_pos=cols[3].split(" ")
if cat == "PER":
name_filt_pos=[]
for pidx, pos in enumerate(entity_pos):
if pos == "NOUN" or pos == "PROPN":
name_filt_pos.append(text[pidx])
entities.append(name_filt_pos)
is_named.append(1)
return entities, is_named
def process(self, spanFile):
entities, is_named=read_file(spanFile)
cluster(entities, is_named)
def cluster_identical_propers(self, entities, refs):
""" Assign all mentions that are identical to the same entity (used in combination with only performing
pronominal coreference resolution) """
max_id=1
if len(refs) > 0:
max_id=max(refs)+1
names={}
for idx, (s, e, full_cat, name) in enumerate(entities):
prop=full_cat.split("_")[0]
cat=full_cat.split("_")[1]
if prop == "PROP" and cat != "PER":
n=name.lower()
key="%s_%s_%s" % (n, prop, cat)
if key not in names:
names[key]=max_id
max_id+=1
refs[idx]=names[key]
return refs
def cluster_noms(self, entities, refs):
""" Assign all nominal mentions that are identical to the same entity (used in combination with only performing
pronominal coreference resolution) """
names={}
mapper={}
for idx, (s, e, cat, name) in enumerate(entities):
prop=cat.split("_")[0]
if prop == "NOM":
n=name.lower()
if n not in names:
names[n]=refs[idx]
else:
mapper[refs[idx]]=names[n]
for idx, ref in enumerate(refs):
if ref in mapper:
refs[idx]=mapper[ref]
return refs
def cluster_narrator(self, entities, in_quotes, tokens):
""" Create an entity for the first-person narrator from all mentiosn of "I", "me", "my" and "myself" outside of quotes """
narrator_pronouns=set(["i", "me", "my", "myself"])
refs=[]
for idx, (s, e, _, name) in enumerate(entities):
if in_quotes[idx] == 0 and name.lower() in narrator_pronouns:
refs.append(0)
# window=25
# start=max(0, s-window)
# end=min(e+25, len(tokens))
# context=[tok.text for tok in tokens[start:end]]
# print("narrator\t\t", name, ' '.join(context))
else:
refs.append(-1)
return refs
def cluster_only_nouns(self, entities, refs, tokens):
hon_mapper={"mister":"mr.", "mr.":"mr.", "mr":"mr.", "mistah":"mr.", "mastah":"mr.", "master":"mr.",
"miss":"miss", "ms.": "miss", "ms":"miss","missus":"miss","mistress":"miss",
"mrs.":"mrs.", "mrs":"mrs."
}
def map_honorifics(term):
term=term.lower()
if term in hon_mapper:
return hon_mapper[term]
return None
is_named=[]
entity_names=[]
for start, end, cat, text in entities:
ner_prop=cat.split("_")[0]
ner_type=cat.split("_")[1]
if ner_prop == "PROP" and ner_type == "PER":
is_named.append(1)
else:
is_named.append(0)
new_text=[]
for i in range(start,end+1):
hon_mapped=map_honorifics(tokens[i].text)
if (hon_mapped is not None or (tokens[i].pos == "NOUN" or tokens[i].pos == "PROPN")) and tokens[i].text.lower()[0] != tokens[i].text[0]:
val=tokens[i].text
if hon_mapped is not None:
val=hon_mapped
new_text.append(val)
if len(new_text) > 0:
entity_names.append(new_text)
else:
entity_names.append(text.split(" "))
return self.cluster(entity_names, is_named, refs)
def cluster(self, entities, is_named, refs):
refs=self.name_cluster(entities, is_named, refs)
clusters={}
for i, val in enumerate(refs):
ref=refs[i]
if ref not in clusters:
clusters[ref]=Counter()
clusters[ref][' '.join(entities[i])]+=1
# if two clusters have significant overlap in mention phrases, merge them into one
for ref in clusters:
for ref2 in clusters:
if ref == ref2 or clusters[ref] is None or clusters[ref2] is None or ref == -1 or ref2 == -1 or ref == 0 or ref2 == 0:
continue
# find which cluster is bigger and should contain the other
sum1=sum(clusters[ref].values())
sum2=sum(clusters[ref2].values())
big=ref
small=ref2
if sum2 > sum1:
big=ref2
small=ref
sim=self.calc_overlap(clusters[small], clusters[big])
if sim > 0.9:
for k,v in clusters[small].most_common():
clusters[big][k]+=v
for idx, r in enumerate(refs):
if r == small:
refs[idx]=big
clusters[small]=None
counts=Counter()
for ref in clusters:
if clusters[ref] is not None:
counts[ref]=sum(clusters[ref].values())
return refs
if __name__ == "__main__":
aliasFile = pkg_resources.resource_filename(__name__, "data/aliases.txt")
resolver=NameCoref(aliasFile)
resolver.process(sys.argv[1])
|
autotest/pyscripts/gdal2tiles/test_add_alpha_band_to_string_vrt.py | FeU-aKlos/gdal | 3,100 | 12608892 | #!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: gdal2tiles.py testing
# Author: <NAME> <<EMAIL>>
#
###############################################################################
# Copyright (c) 2017, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
from unittest import TestCase
from xml.etree import ElementTree
from osgeo_utils import gdal2tiles
class AddAlphaBandToStringVrtTest(TestCase):
def test_adds_the_correct_band_info_3_bands(self):
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped_rgb.vrt"), 'r') as f:
orig_vrt = f.read()
modif_vrt = gdal2tiles.add_alpha_band_to_string_vrt(orig_vrt)
vrt_root = ElementTree.fromstring(modif_vrt)
band4 = vrt_root.findall(".//VRTRasterBand[@band='4']")
self.assertIsNotNone(band4)
self.assertEqual(len(band4), 1)
band4_color = band4[0].find("./ColorInterp")
self.assertEqual(band4_color.text, "Alpha")
def test_band_is_in_the_right_place(self):
"""
This is likely not necessary from a file format/machine standpoint, but is much better for a
human looking at the file
"""
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped_rgb.vrt"), 'r') as f:
orig_vrt = f.read()
modif_vrt = gdal2tiles.add_alpha_band_to_string_vrt(orig_vrt)
vrt_root = ElementTree.fromstring(modif_vrt)
nb_bands = 0
for elem in list(vrt_root):
if elem.tag == "VRTRasterBand":
nb_bands += 1
# Band should be numbered in increasing order, starting at 1
self.assertEqual(elem.get("band"), str(nb_bands))
else:
if nb_bands:
# If we have already seen bands, exits, they should be all grouped
break
self.assertEqual(nb_bands, 4)
def test_adds_the_correct_band_info_1_band(self):
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped_grey.vrt"), 'r') as f:
orig_vrt = f.read()
modif_vrt = gdal2tiles.add_alpha_band_to_string_vrt(orig_vrt)
vrt_root = ElementTree.fromstring(modif_vrt)
band2 = vrt_root.findall(".//VRTRasterBand[@band='2']")
self.assertIsNotNone(band2)
self.assertEqual(len(band2), 1)
band2_color = band2[0].find("./ColorInterp")
self.assertEqual(band2_color.text, "Alpha")
def test_adds_the_alpha_option(self):
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped_rgb.vrt"), 'r') as f:
orig_vrt = f.read()
modif_vrt = gdal2tiles.add_alpha_band_to_string_vrt(orig_vrt)
vrt_root = ElementTree.fromstring(modif_vrt)
alpha_band_option = vrt_root.find(".//GDALWarpOptions/DstAlphaBand")
self.assertIsNotNone(alpha_band_option)
self.assertEqual(alpha_band_option.text, "4")
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped_grey.vrt"), 'r') as f:
orig_vrt = f.read()
modif_vrt = gdal2tiles.add_alpha_band_to_string_vrt(orig_vrt)
vrt_root = ElementTree.fromstring(modif_vrt)
alpha_band_option = vrt_root.find(".//GDALWarpOptions/DstAlphaBand")
self.assertIsNotNone(alpha_band_option)
self.assertEqual(alpha_band_option.text, "2")
def test_adds_the_init_dest_option(self):
with open(os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"data",
"warped_rgb.vrt"), 'r') as f:
orig_vrt = f.read()
modif_vrt = gdal2tiles.add_alpha_band_to_string_vrt(orig_vrt)
vrt_root = ElementTree.fromstring(modif_vrt)
init_dest_option = vrt_root.find(".//GDALWarpOptions/Option[@name='INIT_DEST']")
self.assertIsNotNone(init_dest_option)
self.assertEqual(init_dest_option.text, "0")
|
primitives/tf_encrypted/primitives/__init__.py | wqruan/tf-encrypted | 825 | 12608933 | from . import paillier
from . import sodium
__all__ = [
"paillier",
"sodium",
]
|
tests/components/debugpy/__init__.py | tbarbette/core | 30,023 | 12608934 | """Tests for the Remote Python Debugger integration."""
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_part_detections/migrations/0008_auto_20200907_0933.py | kaka-lin/azure-intelligent-edge-patterns | 176 | 12608956 | # Generated by Django 3.0.8 on 2020-09-07 09:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cameras", "0002_camera_location"),
("azure_part_detections", "0007_auto_20200907_0740"),
]
operations = [
migrations.RemoveField(model_name="partdetection", name="camera"),
migrations.AddField(
model_name="partdetection",
name="cameras",
field=models.ManyToManyField(blank=True, to="cameras.Camera"),
),
]
|
plenum/test/req_handler/test_get_txn_author_agreement_handler.py | andkononykhin/plenum | 148 | 12608981 | <reponame>andkononykhin/plenum
import pytest as pytest
from plenum.common.constants import TXN_TYPE, GET_TXN_AUTHOR_AGREEMENT, \
GET_TXN_AUTHOR_AGREEMENT_VERSION, GET_TXN_AUTHOR_AGREEMENT_DIGEST, GET_TXN_AUTHOR_AGREEMENT_TIMESTAMP
from plenum.common.exceptions import InvalidClientRequest
from plenum.common.request import Request
from plenum.server.database_manager import DatabaseManager
from plenum.server.request_handlers.get_txn_author_agreement_handler import GetTxnAuthorAgreementHandler
@pytest.fixture(scope="function")
def get_txn_author_agreement_handler(tconf):
data_manager = DatabaseManager()
handler = GetTxnAuthorAgreementHandler(data_manager)
return handler
def test_static_validation(get_txn_author_agreement_handler):
request = Request(operation={TXN_TYPE: GET_TXN_AUTHOR_AGREEMENT,
GET_TXN_AUTHOR_AGREEMENT_VERSION: "VERSION"})
get_txn_author_agreement_handler.static_validation(request)
request = Request(operation={TXN_TYPE: GET_TXN_AUTHOR_AGREEMENT,
GET_TXN_AUTHOR_AGREEMENT_DIGEST: "DIGEST"})
get_txn_author_agreement_handler.static_validation(request)
request = Request(operation={TXN_TYPE: GET_TXN_AUTHOR_AGREEMENT,
GET_TXN_AUTHOR_AGREEMENT_TIMESTAMP: 1559299045})
get_txn_author_agreement_handler.static_validation(request)
def test_static_validation_with_redundant_fields(get_txn_author_agreement_handler):
request = Request(operation={TXN_TYPE: GET_TXN_AUTHOR_AGREEMENT,
GET_TXN_AUTHOR_AGREEMENT_VERSION: "VERSION",
GET_TXN_AUTHOR_AGREEMENT_DIGEST: "DIGEST"})
with pytest.raises(InvalidClientRequest,
match="GET_TXN_AUTHOR_AGREEMENT request can have at most one of "
"the following parameters: version, digest, timestamp"):
get_txn_author_agreement_handler.static_validation(request)
|
Anaconda-files/Program_09c.py | arvidl/dynamical-systems-with-applications-using-python | 106 | 12608990 | # Program 09c: Phase portrait and Poincare section of a nonautonomous ODE.
# See Figure 9.11(b).
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
xmin, xmax = -2, 2
ymin, ymax = -2, 2
k = 0.3
omega = 1.25
gamma = 0.5
def dx_dt(x, t):
return [x[1], x[0] - k*x[1] - x[0]**3 + gamma*np.cos(omega*t)]
# Phase portrait
t = np.linspace(0, 500, 10000)
xs = odeint(dx_dt, [1,0], t)
plt.plot(xs[:, 0], xs[:, 1], 'r-', lw=0.5)
plt.xlabel('x', fontsize=15)
plt.ylabel('y', fontsize=15)
plt.tick_params(labelsize=15)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.title('Phase portrait')
# The Poincare section.
fig, ax = plt.subplots(figsize=(6, 6))
t = np.linspace(0, 4000 * (2*np.pi) / omega, 16000000)
xs = odeint(dx_dt, [1, 0], t)
x = [xs[4000*i, 0] for i in range(4000)]
y = [xs[4000*i, 1] for i in range(4000)]
ax.scatter(x, y, color='blue', s=0.1)
plt.xlabel('x', fontsize=15)
plt.ylabel('y', fontsize=15)
plt.tick_params(labelsize=15)
plt.title('The Poincare section')
plt.show()
|
utils/helpers.py | sadjadasghari/EfficientPose | 182 | 12609005 | from tensorflow.keras.applications.imagenet_utils import preprocess_input as efficientnet_preprocess_input
from tensorflow.keras.layers import Activation
from tensorflow.keras.backend import sigmoid, constant
from tensorflow.keras.initializers import Initializer
from torch.nn import ConvTranspose2d, init
from torch import Tensor
import numpy as np
import math
from skimage.transform import rescale
from skimage.util import pad as padding
from scipy.ndimage.filters import gaussian_filter
class Swish(Activation):
"""
Custom Swish activation function for Keras.
"""
def __init__(self, activation, **kwargs):
super(Swish, self).__init__(activation, **kwargs)
self.__name__ = 'Swish'
def swish1(x):
"""
Standard Swish activation.
Args:
x: Keras tensor
Input tensor
Returns:
Output tensor of Swish transformation.
"""
return x * sigmoid(x)
def eswish(x):
"""
E-swish activation with Beta value of 1.25.
Args:
x: Keras tensor
Input tensor
Returns:
Output tensor of E-swish transformation.
"""
beta = 1.25
return beta * x * sigmoid(x)
class keras_BilinearWeights(Initializer):
"""
A Keras implementation of bilinear weights by <NAME> (https://github.com/tensorlayer/tensorlayer/issues/53)
"""
def __init__(self, shape=None, dtype=None):
self.shape = shape
self.dtype = dtype
def __call__(self, shape=None, dtype=None):
# Initialize parameters
if shape:
self.shape = shape
self.dtype = type=np.float32 # Overwrites argument
scale = 2
filter_size = self.shape[0]
num_channels = self.shape[2]
# Create bilinear weights
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=self.dtype)
scale_factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(filter_size):
for y in range(filter_size):
bilinear_kernel[x,y] = (1 - abs(x - center) / scale_factor) * \
(1 - abs(y - center) / scale_factor)
# Assign weights
weights = np.zeros((filter_size, filter_size, num_channels, num_channels))
for i in range(num_channels):
weights[:, :, i, i] = bilinear_kernel
return constant(value=weights)
def get_config(self):
return {'shape': self.shape}
class pytorch_BilinearConvTranspose2d(ConvTranspose2d):
"""
A PyTorch implementation of transposed bilinear convolution by mjstevens777 (https://gist.github.com/mjstevens777/9d6771c45f444843f9e3dce6a401b183)
"""
def __init__(self, channels, kernel_size, stride, groups=1):
"""Set up the layer.
Parameters
----------
channels: int
The number of input and output channels
stride: int or tuple
The amount of upsampling to do
groups: int
Set to 1 for a standard convolution. Set equal to channels to
make sure there is no cross-talk between channels.
"""
if isinstance(stride, int):
stride = (stride, stride)
assert groups in (1, channels), "Must use no grouping, " + \
"or one group per channel"
padding = (stride[0] - 1, stride[1] - 1)
super().__init__(
channels, channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups)
def reset_parameters(self):
"""Reset the weight and bias."""
init.constant(self.bias, 0)
init.constant(self.weight, 0)
bilinear_kernel = self.bilinear_kernel(self.kernel_size[0])
for i in range(self.in_channels):
if self.groups == 1:
j = i
else:
j = 0
self.weight.data[i, j] = bilinear_kernel
@staticmethod
def bilinear_kernel(kernel_size):
"""Generate a bilinear upsampling kernel."""
bilinear_kernel = np.zeros([kernel_size, kernel_size])
scale_factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(kernel_size):
for y in range(kernel_size):
bilinear_kernel[x,y] = (1 - abs(x - center) / scale_factor) * \
(1 - abs(y - center) / scale_factor)
return Tensor(bilinear_kernel)
def resize(source_array, target_height, target_width):
"""
Resizes an image or image-like Numpy array to be no larger than (target_height, target_width) or (target_height, target_width, c).
Args:
source_array: ndarray
Numpy array of shape (h, w) or (h, w, 3)
target_height: int
Desired maximum height
target_width: int
Desired maximum width
Returns:
Resized Numpy array.
"""
# Get height and width of source array
source_height, source_width = source_array.shape[:2]
# Compute correct scale for resizing operation
target_ratio = target_height / target_width
source_ratio = source_height / source_width
if target_ratio > source_ratio:
scale = target_width / source_width
else:
scale = target_height / source_height
# Perform rescaling
resized_array = rescale(source_array, scale, multichannel=True)
return resized_array
def pad(source_array, target_height, target_width):
"""
Pads an image or image-like Numpy array with zeros to fit the target-size.
Args:
source_array: ndarray
Numpy array of shape (h, w) or (h, w, 3)
target_height: int
Height of padded image
target_width: int
Width of padded image
Returns:
Zero-padded Numpy array of shape (target_height, target_width) or (target_height, target_width, c).
"""
# Get height and width of source array
source_height, source_width = source_array.shape[:2]
# Ensure array is resized properly
if (source_height > target_height) or (source_width > target_width):
source_array = resize(source_array, target_height, target_width)
source_height, source_width = source_array.shape[:2]
# Compute padding variables
pad_left = int((target_width - source_width) / 2)
pad_top = int((target_height - source_height) / 2)
pad_right = int(target_width - source_width - pad_left)
pad_bottom = int(target_height - source_height - pad_top)
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
has_channels_dim = len(source_array.shape) == 3
if has_channels_dim:
paddings.append([0,0])
# Perform padding
target_array = padding(source_array, paddings, 'constant')
return target_array
def preprocess(batch, resolution, lite=False):
"""
Preprocess Numpy array according to model preferences.
Args:
batch: ndarray
Numpy array of shape (n, h, w, 3)
resolution: int
Input height and width of model to utilize
lite: boolean
Defines if EfficientPose Lite model is used
Returns:
Preprocessed Numpy array of shape (n, resolution, resolution, 3).
"""
# Resize frames according to side
batch = [resize(frame, resolution, resolution) for frame in batch]
# Pad frames in batch to form quadratic input
batch = [pad(frame, resolution, resolution) for frame in batch]
# Convert from normalized pixels to RGB absolute values
batch = [np.uint8(255 * frame) for frame in batch]
# Construct Numpy array from batch
batch = np.asarray(batch)
# Preprocess images in batch
if lite:
batch = efficientnet_preprocess_input(batch, mode='tf')
else:
batch = efficientnet_preprocess_input(batch, mode='torch')
return batch
def extract_coordinates(frame_output, frame_height, frame_width, real_time=False):
"""
Extract coordinates from supplied confidence maps.
Args:
frame_output: ndarray
Numpy array of shape (h, w, c)
frame_height: int
Height of relevant frame
frame_width: int
Width of relevant frame
real-time: boolean
Defines if processing is performed in real-time
Returns:
List of predicted coordinates for all c body parts in the frame the outputs are computed from.
"""
# Define body parts
body_parts = ['head_top', 'upper_neck', 'right_shoulder', 'right_elbow', 'right_wrist', 'thorax', 'left_shoulder', 'left_elbow', 'left_wrist', 'pelvis', 'right_hip', 'right_knee', 'right_ankle', 'left_hip', 'left_knee', 'left_ankle']
# Define confidence level
confidence = 0.3
# Fetch output resolution
output_height, output_width = frame_output.shape[0:2]
# Initialize coordinates
frame_coords = []
# Iterate over body parts
for i in range(frame_output.shape[-1]):
# Find peak point
conf = frame_output[...,i]
if not real_time:
conf = gaussian_filter(conf, sigma=1.)
max_index = np.argmax(conf)
peak_y = float(math.floor(max_index / output_width))
peak_x = max_index % output_width
# Verify confidence
if real_time and conf[int(peak_y),int(peak_x)] < confidence:
peak_x = -0.5
peak_y = -0.5
else:
peak_x += 0.5
peak_y += 0.5
# Normalize coordinates
peak_x /= output_width
peak_y /= output_height
# Convert to original aspect ratio
if frame_width > frame_height:
norm_padding = (frame_width - frame_height) / (2 * frame_width)
peak_y = (peak_y - norm_padding) / (1.0 - (2 * norm_padding))
peak_y = -0.5 / output_height if peak_y < 0.0 else peak_y
peak_y = 1.0 if peak_y > 1.0 else peak_y
elif frame_width < frame_height:
norm_padding = (frame_height - frame_width) / (2 * frame_height)
peak_x = (peak_x - norm_padding) / (1.0 - (2 * norm_padding))
peak_x = -0.5 / output_width if peak_x < 0.0 else peak_x
peak_x = 1.0 if peak_x > 1.0 else peak_x
frame_coords.append((body_parts[i], peak_x, peak_y))
return frame_coords
def display_body_parts(image, image_draw, coordinates, image_height=1024, image_width=1024, marker_radius=5):
"""
Draw markers on predicted body part locations.
Args:
image: PIL Image
The loaded image the coordinate predictions are inferred for
image_draw: PIL ImageDraw module
Module for performing drawing operations
coordinates: List
Predicted body part coordinates in image
image_height: int
Height of image
image_width: int
Width of image
marker_radius: int
Radius of marker
Returns:
Instance of PIL image with annotated body part predictions.
"""
# Define body part colors
body_part_colors = ['#fff142', '#fff142', '#576ab1', '#5883c4', '#56bdef', '#f19718', '#d33592', '#d962a6', '#e18abd', '#f19718', '#8ac691', '#a3d091', '#bedb8f', '#7b76b7', '#907ab8', '#a97fb9']
# Draw markers
for i, (body_part, body_part_x, body_part_y) in enumerate(coordinates):
body_part_x *= image_width
body_part_y *= image_height
image_draw.ellipse([(body_part_x - marker_radius, body_part_y - marker_radius), (body_part_x + marker_radius, body_part_y + marker_radius)], fill=body_part_colors[i])
return image
def display_segments(image, image_draw, coordinates, image_height=1024, image_width=1024, segment_width=5):
"""
Draw segments between body parts according to predicted body part locations.
Args:
image: PIL Image
The loaded image the coordinate predictions are inferred for
image_draw: PIL ImageDraw module
Module for performing drawing operations
coordinates: List
Predicted body part coordinates in image
image_height: int
Height of image
image_width: int
Width of image
segment_width: int
Width of association line between markers
Returns:
Instance of PIL image with annotated body part segments.
"""
# Define segments and colors
segments = [(0, 1), (1, 5), (5, 2), (5, 6), (5, 9), (2, 3), (3, 4), (6, 7), (7, 8), (9, 10), (9, 13), (10, 11), (11, 12), (13, 14), (14, 15)]
segment_colors = ['#fff142', '#fff142', '#576ab1', '#5883c4', '#56bdef', '#f19718', '#d33592', '#d962a6', '#e18abd', '#f19718', '#8ac691', '#a3d091', '#bedb8f', '#7b76b7', '#907ab8', '#a97fb9']
# Draw segments
for (body_part_a_index, body_part_b_index) in segments:
_, body_part_a_x, body_part_a_y = coordinates[body_part_a_index]
body_part_a_x *= image_width
body_part_a_y *= image_height
_, body_part_b_x, body_part_b_y = coordinates[body_part_b_index]
body_part_b_x *= image_width
body_part_b_y *= image_height
image_draw.line([(body_part_a_x, body_part_a_y), (body_part_b_x, body_part_b_y)], fill=segment_colors[body_part_b_index], width=segment_width)
return image
def display_camera(cv2, frame, coordinates, frame_height, frame_width):
"""
Display camera frame with annotated body parts and segments according to predicted body part locations.
Args:
cv2: OpenCV
Imported OpenCV instance
frame: ndarray
Numpy array of shape (h, w, 3)
coordinates: List
Predicted body part coordinates in frame
frame_height: int
Height of frame
frame_width: int
Width of frame
"""
# Define body parts and segments
segments = [(0, 1), (1, 5), (5, 2), (5, 6), (5, 9), (2, 3), (3, 4), (6, 7), (7, 8), (9, 10), (9, 13), (10, 11), (11, 12), (13, 14), (14, 15)]
body_part_colors = [(66, 241, 255), (66, 241, 255), (177, 106, 87), (196, 131, 88), (239, 189, 86), (24, 151, 241), (146, 53, 211), (166, 98, 217), (189, 138, 225), (24, 151, 241), (145, 198, 138), (145, 208, 163), (143, 219, 190), (183, 118, 123), (184, 122, 144), (185, 127, 169)]
# Draw lines and markers
remaining = [i for i in range(len(body_part_colors))]
for (a, b) in segments:
a_coordinates = coordinates[a]
a_coordinate_x = int(a_coordinates[1] * frame_width)
a_coordinate_y = int(a_coordinates[2] * frame_height)
b_coordinates = coordinates[b]
b_coordinate_x = int(b_coordinates[1] * frame_width)
b_coordinate_y = int(b_coordinates[2] * frame_height)
if not (a_coordinate_x < 0 or a_coordinate_y < 0 or b_coordinate_x < 0 or b_coordinate_y < 0):
cv2.line(frame, (a_coordinate_x, a_coordinate_y), (b_coordinate_x, b_coordinate_y), color=body_part_colors[a], thickness=2)
if a in remaining:
cv2.circle(frame, (a_coordinate_x, a_coordinate_y), radius=3, color=body_part_colors[a], thickness=2)
remaining.remove(a)
if b in remaining:
cv2.circle(frame, (b_coordinate_x, b_coordinate_y), radius=3, color=body_part_colors[b], thickness=2)
remaining.remove(b)
# Display predictions
frame = cv2.resize(cv2.flip(frame, 1), (1000, 1000))
cv2.imshow('EfficientPose (Groos et al., 2020)', frame) |
tools/fasta_stats/fasta-stats.py | ErasmusMC-Bioinformatics/tools-iuc | 142 | 12609043 | <gh_stars>100-1000
#!/usr/bin/env python
# python version of fasta-stats with some extra features
# written by <EMAIL>
# git: @codemeleon
# date: 10/11/2021
import argparse
import re
from os import path
import numpy as np
from Bio import SeqIO
def calculate_NG50(estimated_genome, total_length, sequence_lengths):
temp = 0
teoretical_NG50 = estimated_genome / 2.0
NG50 = 0
for seq in sequence_lengths:
temp += seq
if teoretical_NG50 < temp:
NG50 = seq
break
return NG50
def run(fasta, stats_output, gaps_output, genome_size):
"""Generates scaffold statistics."""
if not fasta:
exit("Input file not given.")
if not path.isfile(fasta):
exit(f"{fasta} path does not exist.")
seq_len = {}
bases_global = {"A": 0, "N": 0, "T": 0, "C": 0, "G": 0}
bases_seq = {}
seq_id_Ngaprange = {}
nstart = 0
contigs_len = []
gap_count = 0
for seq_record in SeqIO.parse(fasta, "fasta"):
seq = str(seq_record.seq).upper()
# print(len(seq))
seq_len[seq_record.id] = len(seq)
# NOTE: Nucleotide count
bases_seq[seq_record.id] = {
"A": seq.count("A"),
"N": seq.count("N"),
"T": seq.count("T"),
"C": seq.count("C"),
"G": seq.count("G"),
}
bases_global["A"] += bases_seq[seq_record.id]["A"]
bases_global["N"] += bases_seq[seq_record.id]["N"]
bases_global["T"] += bases_seq[seq_record.id]["T"]
bases_global["C"] += bases_seq[seq_record.id]["C"]
bases_global["G"] += bases_seq[seq_record.id]["G"]
# NOTE: Gap count and their range
range_gen = re.finditer("N+", seq)
n_range = [match.span() for match in range_gen]
for n_rng in n_range:
if n_rng[0] == 0 or n_rng[1] == seq_len[seq_record.id]:
continue
else:
gap_count += 1
# NOTE: Contigs, their lenths from scaffold and their N gap range
seq_id_Ngaprange[seq_record.id] = n_range
n_range_len = len(n_range)
if n_range_len > 0:
n_range = (
[(0, 0)] + n_range + [(seq_len[seq_record.id], seq_len[seq_record.id])]
)
for idx in range(n_range_len + 1):
nstart = n_range[idx][1]
nend = n_range[idx + 1][0]
con_len = nend - nstart
if con_len:
contigs_len.append(con_len)
else:
contigs_len.append(len(seq))
# NOTE: Scaffold statistics
SEQ_LEN_LIST = sorted(seq_len.values(), reverse=True)
scaffold_lens = np.array(SEQ_LEN_LIST)
scaffold_lens_sum = np.cumsum(scaffold_lens)
N50_len = scaffold_lens_sum[-1] * 0.5
N50_idx = np.where(scaffold_lens_sum > N50_len)[0][0]
N90_len = scaffold_lens_sum[-1] * 0.9
N90_idx = np.where(scaffold_lens_sum > N90_len)[0][0]
NG50 = calculate_NG50(genome_size, scaffold_lens_sum[-1], scaffold_lens)
# NOTE: Contig statistics
seq_len_list = sorted(contigs_len, reverse=True)
contigs_len = np.array(seq_len_list)
contigs_len_sum = np.cumsum(contigs_len)
n50_len = contigs_len_sum[-1] * 0.5
n50_idx = np.where(contigs_len_sum > n50_len)[0][0]
n90_len = contigs_len_sum[-1] * 0.9
n90_idx = np.where(contigs_len_sum > n90_len)[0][0]
ng50 = calculate_NG50(genome_size, contigs_len_sum[-1], contigs_len)
with open(stats_output, "w") as soutput:
soutput.write("{}\t{}\n".format("Scaffold L50", N50_idx + 1))
soutput.write("{}\t{}\n".format("Scaffold N50", SEQ_LEN_LIST[N50_idx]))
soutput.write("{}\t{}\n".format("Scaffold L90", N90_idx + 1))
soutput.write("{}\t{}\n".format("Scaffold N90", SEQ_LEN_LIST[N90_idx]))
if genome_size != 0:
soutput.write("{}\t{}\n".format("Scaffold NG50", NG50))
soutput.write("{}\t{}\n".format("Scaffold len_max", SEQ_LEN_LIST[0]))
soutput.write("{}\t{}\n".format("Scaffold len_min", SEQ_LEN_LIST[-1]))
soutput.write(
"{}\t{}\n".format("Scaffold len_mean", int(np.mean(SEQ_LEN_LIST)))
)
soutput.write(
"{}\t{}\n".format("Scaffold len_median", int(np.median(SEQ_LEN_LIST)))
)
soutput.write("{}\t{}\n".format("Scaffold len_std", int(np.std(SEQ_LEN_LIST))))
soutput.write("{}\t{}\n".format("Scaffold num_A", bases_global["A"]))
soutput.write("{}\t{}\n".format("Scaffold num_T", bases_global["T"]))
soutput.write("{}\t{}\n".format("Scaffold num_C", bases_global["C"]))
soutput.write("{}\t{}\n".format("Scaffold num_G", bases_global["G"]))
soutput.write("{}\t{}\n".format("Scaffold num_N", bases_global["N"]))
soutput.write("{}\t{}\n".format("Scaffold num_bp", scaffold_lens_sum[-1]))
soutput.write(
"{}\t{}\n".format(
"Scaffold num_bp_not_N", scaffold_lens_sum[-1] - bases_global["N"]
)
)
soutput.write("{}\t{}\n".format("Scaffold num_seq", len(SEQ_LEN_LIST)))
soutput.write(
"{}\t{:.2f}\n".format(
"Scaffold GC content overall",
(
(bases_global["G"] + bases_global["C"])
* 100.0
/ scaffold_lens_sum[-1]
),
)
)
soutput.write("{}\t{}\n".format("Contig L50", n50_idx + 1))
soutput.write("{}\t{}\n".format("Contig N50", seq_len_list[n50_idx]))
soutput.write("{}\t{}\n".format("Contig L90", n90_idx + 1))
soutput.write("{}\t{}\n".format("Contig N90", seq_len_list[n90_idx]))
if genome_size != 0:
soutput.write("{}\t{}\n".format("Contig NG50", ng50))
soutput.write("{}\t{}\n".format("Contig len_max", seq_len_list[0]))
soutput.write("{}\t{}\n".format("Contig len_min", seq_len_list[-1]))
soutput.write("{}\t{}\n".format("Contig len_mean", int(np.mean(seq_len_list))))
soutput.write(
"{}\t{}\n".format("Contig len_median", int(np.median(seq_len_list)))
)
soutput.write("{}\t{}\n".format("Contig len_std", int(np.std(seq_len_list))))
soutput.write("{}\t{}\n".format("Contig num_bp", contigs_len_sum[-1]))
soutput.write("{}\t{}\n".format("Contig num_seq", len(contigs_len_sum)))
soutput.write("{}\t{}\n".format("Number of gaps", gap_count))
if gaps_output is not None:
# NOTE: generate gaps statistics file
with open(gaps_output, "w") as goutput:
for key in seq_id_Ngaprange:
for rng in seq_id_Ngaprange[key]:
goutput.write("{}\t{}\t{}\n".format(key, rng[0], rng[1]))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--fasta", required=True, help="FASTA file")
parser.add_argument(
"-z",
"--genome_size",
required=False,
type=int,
help="If provided, the NG50 statistic will be computed",
default=0,
)
parser.add_argument(
"-s",
"--stats_output",
required=True,
help="File to store the general statistics",
)
parser.add_argument(
"-r",
"--gaps_output",
required=False,
help="File to store the gaps statistics",
default=None,
)
args = parser.parse_args()
run(
args.fasta,
args.stats_output,
args.gaps_output,
args.genome_size,
)
|
tests/bytecode/mp-tests/tuple3.py | LabAixBidouille/micropython | 303 | 12609054 | def f(x):
return x, x + 1
for a in b, c:
f(a)
|
doc/sphinxext/__init__.py | nno/PyMVPA | 227 | 12609069 | <filename>doc/sphinxext/__init__.py
"""Dummy module groupping our collection of 3rd party extensions"""
|
tests/test_input_laplace_1d.py | clazaro/sfepy | 510 | 12609149 | <reponame>clazaro/sfepy<gh_stars>100-1000
from __future__ import absolute_import
input_name = '../examples/diffusion/laplace_1d.py'
output_name = 'test_laplace_1d.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
|
tests/profiling/test_recorder.py | p7g/dd-trace-py | 308 | 12609152 | # -*- encoding: utf-8 -*-
import os
import pytest
from ddtrace.profiling import event
from ddtrace.profiling import recorder
from ddtrace.profiling.collector import stack_event
from tests.utils import call_program
def test_defaultdictkey():
d = recorder._defaultdictkey(lambda k: [str(k) + "k"])
assert isinstance(d["abc"], list)
assert 2 not in d
d[1].append("hello")
assert d[1] == ["1k", "hello"]
d[1].append("bar")
assert d[1] == ["1k", "hello", "bar"]
def test_defaultdictkey_no_factory():
d = recorder._defaultdictkey()
with pytest.raises(KeyError):
d[1]
def test_reset():
r = recorder.Recorder()
r.push_event(event.Event())
assert len(r.events[event.Event]) == 1
assert len(r.reset()[event.Event]) == 1
assert len(r.events[event.Event]) == 0
assert len(r.reset()[event.Event]) == 0
r.push_event(event.Event())
assert len(r.events[event.Event]) == 1
assert len(r.reset()[event.Event]) == 1
def test_push_events_empty():
r = recorder.Recorder()
r.push_events([])
assert len(r.events[event.Event]) == 0
def test_limit():
r = recorder.Recorder(
default_max_events=12,
max_events={
stack_event.StackSampleEvent: 24,
},
)
assert r.events[stack_event.StackExceptionSampleEvent].maxlen == 12
assert r.events[stack_event.StackSampleEvent].maxlen == 24
def test_fork():
stdout, stderr, exitcode, pid = call_program("python", os.path.join(os.path.dirname(__file__), "recorder_fork.py"))
assert exitcode == 0, (stdout, stderr)
|
ignite/metrics/ssim.py | Eunjnnn/ignite | 4,119 | 12609178 | <filename>ignite/metrics/ssim.py
from typing import Callable, Sequence, Union
import torch
import torch.nn.functional as F
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["SSIM"]
class SSIM(Metric):
"""
Computes Structual Similarity Index Measure
Args:
data_range: Range of the image. Typically, ``1.0`` or ``255``.
kernel_size: Size of the kernel. Default: (11, 11)
sigma: Standard deviation of the gaussian kernel.
Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
k1: Parameter of SSIM. Default: 0.01
k2: Parameter of SSIM. Default: 0.03
gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
output_transform: A callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need
to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.
.. testcode::
metric = SSIM(data_range=1.0)
metric.attach(default_evaluator, 'ssim')
preds = torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['ssim'])
.. testoutput::
0.9218971...
.. versionadded:: 0.4.2
"""
def __init__(
self,
data_range: Union[int, float],
kernel_size: Union[int, Sequence[int]] = (11, 11),
sigma: Union[float, Sequence[float]] = (1.5, 1.5),
k1: float = 0.01,
k2: float = 0.03,
gaussian: bool = True,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if isinstance(kernel_size, int):
self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]
elif isinstance(kernel_size, Sequence):
self.kernel_size = kernel_size
else:
raise ValueError("Argument kernel_size should be either int or a sequence of int.")
if isinstance(sigma, float):
self.sigma = [sigma, sigma] # type: Sequence[float]
elif isinstance(sigma, Sequence):
self.sigma = sigma
else:
raise ValueError("Argument sigma should be either float or a sequence of float.")
if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
if any(y <= 0 for y in self.sigma):
raise ValueError(f"Expected sigma to have positive number. Got {sigma}.")
super(SSIM, self).__init__(output_transform=output_transform, device=device)
self.gaussian = gaussian
self.c1 = (k1 * data_range) ** 2
self.c2 = (k2 * data_range) ** 2
self.pad_h = (self.kernel_size[0] - 1) // 2
self.pad_w = (self.kernel_size[1] - 1) // 2
self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
@reinit__is_reduced
def reset(self) -> None:
# Not a tensor because batch size is not known in advance.
self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]
self._num_examples = 0
self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
def _uniform(self, kernel_size: int) -> torch.Tensor:
max, min = 2.5, -2.5
ksize_half = (kernel_size - 1) * 0.5
kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
for i, j in enumerate(kernel):
if min <= j <= max:
kernel[i] = 1 / (max - min)
else:
kernel[i] = 0
return kernel.unsqueeze(dim=0) # (1, kernel_size)
def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
ksize_half = (kernel_size - 1) * 0.5
kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
if self.gaussian:
kernel_x = self._gaussian(kernel_size[0], sigma[0])
kernel_y = self._gaussian(kernel_size[1], sigma[1])
else:
kernel_x = self._uniform(kernel_size[0])
kernel_y = self._uniform(kernel_size[1])
return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.dtype != y.dtype:
raise TypeError(
f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
)
if y_pred.shape != y.shape:
raise ValueError(
f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
if len(y_pred.shape) != 4 or len(y.shape) != 4:
raise ValueError(
f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
channel = y_pred.size(1)
if len(self._kernel.shape) < 4:
self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)
y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])
outputs = F.conv2d(input_list, self._kernel, groups=channel)
output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]
mu_pred_sq = output_list[0].pow(2)
mu_target_sq = output_list[1].pow(2)
mu_pred_target = output_list[0] * output_list[1]
sigma_pred_sq = output_list[2] - mu_pred_sq
sigma_target_sq = output_list[3] - mu_target_sq
sigma_pred_target = output_list[4] - mu_pred_target
a1 = 2 * mu_pred_target + self.c1
a2 = 2 * sigma_pred_target + self.c2
b1 = mu_pred_sq + mu_target_sq + self.c1
b2 = sigma_pred_sq + sigma_target_sq + self.c2
ssim_idx = (a1 * a2) / (b1 * b2)
self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("SSIM must have at least one example before it can be computed.")
return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]
|
app/pylibs/osx64/Cryptodome/Protocol/KDF.py | skylex77/PokeMapGT | 2,557 | 12609194 | <gh_stars>1000+
#
# KDF.py : a collection of Key Derivation Functions
#
# Part of the Python Cryptography Toolkit
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""This file contains a collection of standard key derivation functions.
A key derivation function derives one or more secondary secret keys from
one primary secret (a master key or a pass phrase).
This is typically done to insulate the secondary keys from each other,
to avoid that leakage of a secondary key compromises the security of the
master key, or to thwart attacks on pass phrases (e.g. via rainbow tables).
"""
import struct
from struct import unpack
from Cryptodome.Util.py3compat import *
from Cryptodome.Hash import SHA1, SHA256, HMAC, CMAC
from Cryptodome.Util.strxor import strxor
from Cryptodome.Util.number import size as bit_size, long_to_bytes, bytes_to_long
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
create_string_buffer,
get_raw_buffer)
_raw_salsa20_lib = load_pycryptodome_raw_lib("Cryptodome.Cipher._Salsa20",
"""
int Salsa20_8_core(const uint8_t *x, const uint8_t *y,
uint8_t *out);
uint32_t load_le_uint32(const uint8_t *in);
""")
def PBKDF1(password, salt, dkLen, count=1000, hashAlgo=None):
"""Derive one key from a password (or passphrase).
This function performs key derivation according an old version of
the PKCS#5 standard (v1.5).
This algorithm is called ``PBKDF1``. Even though it is still described
in the latest version of the PKCS#5 standard (version 2, or RFC2898),
newer applications should use the more secure and versatile `PBKDF2` instead.
:Parameters:
password : string
The secret password or pass phrase to generate the key from.
salt : byte string
An 8 byte string to use for better protection from dictionary attacks.
This value does not need to be kept secret, but it should be randomly
chosen for each derivation.
dkLen : integer
The length of the desired key. Default is 16 bytes, suitable for instance for `Cryptodome.Cipher.AES`.
count : integer
The number of iterations to carry out. It's recommended to use at least 1000.
hashAlgo : module
The hash algorithm to use, as a module or an object from the `Cryptodome.Hash` package.
The digest length must be no shorter than ``dkLen``.
The default algorithm is `SHA1`.
:Return: A byte string of length `dkLen` that can be used as key.
"""
if not hashAlgo:
hashAlgo = SHA1
password = <PASSWORD>(password)
pHash = hashAlgo.new(password+salt)
digest = pHash.digest_size
if dkLen>digest:
raise TypeError("Selected hash algorithm has a too short digest (%d bytes)." % digest)
if len(salt) != 8:
raise ValueError("Salt is not 8 bytes long (%d bytes instead)." % len(salt))
for i in xrange(count-1):
pHash = pHash.new(pHash.digest())
return pHash.digest()[:dkLen]
def PBKDF2(password, salt, dkLen=16, count=1000, prf=None):
"""Derive one or more keys from a password (or passphrase).
This function performs key derivation according to
the PKCS#5 standard (v2.0), by means of the ``PBKDF2`` algorithm.
:Parameters:
password : string
The secret password or pass phrase to generate the key from.
salt : string
A string to use for better protection from dictionary attacks.
This value does not need to be kept secret, but it should be randomly
chosen for each derivation. It is recommended to be at least 8 bytes long.
dkLen : integer
The cumulative length of the desired keys. Default is 16 bytes, suitable for instance for `Cryptodome.Cipher.AES`.
count : integer
The number of iterations to carry out. It's recommended to use at least 1000.
prf : callable
A pseudorandom function. It must be a function that returns a pseudorandom string
from two parameters: a secret and a salt. If not specified, HMAC-SHA1 is used.
:Return: A byte string of length `dkLen` that can be used as key material.
If you wanted multiple keys, just break up this string into segments of the desired length.
"""
password = <PASSWORD>(password)
if prf is None:
prf = lambda p,s: HMAC.new(p,s,SHA1).digest()
def link(s):
s[0], s[1] = s[1], prf(password, s[1])
return s[0]
key = b('')
i = 1
while len(key)<dkLen:
s = [ prf(password, salt + struct.pack(">I", i)) ] * 2
key += reduce(strxor, (link(s) for j in range(count)) )
i += 1
return key[:dkLen]
class _S2V(object):
"""String-to-vector PRF as defined in `RFC5297`_.
This class implements a pseudorandom function family
based on CMAC that takes as input a vector of strings.
.. _RFC5297: http://tools.ietf.org/html/rfc5297
"""
def __init__(self, key, ciphermod, cipher_params=None):
"""Initialize the S2V PRF.
:Parameters:
key : byte string
A secret that can be used as key for CMACs
based on ciphers from ``ciphermod``.
ciphermod : module
A block cipher module from `Cryptodome.Cipher`.
cipher_params : dictionary
A set of extra parameters to use to create a cipher instance.
"""
self._key = key
self._ciphermod = ciphermod
self._last_string = self._cache = bchr(0)*ciphermod.block_size
self._n_updates = ciphermod.block_size*8-1
if cipher_params is None:
self._cipher_params = {}
else:
self._cipher_params = dict(cipher_params)
@staticmethod
def new(key, ciphermod):
"""Create a new S2V PRF.
:Parameters:
key : byte string
A secret that can be used as key for CMACs
based on ciphers from ``ciphermod``.
ciphermod : module
A block cipher module from `Cryptodome.Cipher`.
"""
return _S2V(key, ciphermod)
def _double(self, bs):
doubled = bytes_to_long(bs)<<1
if bord(bs[0]) & 0x80:
doubled ^= 0x87
return long_to_bytes(doubled, len(bs))[-len(bs):]
def update(self, item):
"""Pass the next component of the vector.
The maximum number of components you can pass is equal to the block
length of the cipher (in bits) minus 1.
:Parameters:
item : byte string
The next component of the vector.
:Raise TypeError: when the limit on the number of components has been reached.
:Raise ValueError: when the component is empty
"""
if not item:
raise ValueError("A component cannot be empty")
if self._n_updates==0:
raise TypeError("Too many components passed to S2V")
self._n_updates -= 1
mac = CMAC.new(self._key,
msg=self._last_string,
ciphermod=self._ciphermod,
cipher_params=self._cipher_params)
self._cache = strxor(self._double(self._cache), mac.digest())
self._last_string = item
def derive(self):
""""Derive a secret from the vector of components.
:Return: a byte string, as long as the block length of the cipher.
"""
if len(self._last_string)>=16:
final = self._last_string[:-16] + strxor(self._last_string[-16:], self._cache)
else:
padded = (self._last_string + bchr(0x80)+ bchr(0)*15)[:16]
final = strxor(padded, self._double(self._cache))
mac = CMAC.new(self._key,
msg=final,
ciphermod=self._ciphermod,
cipher_params=self._cipher_params)
return mac.digest()
def HKDF(master, key_len, salt, hashmod, num_keys=1, context=None):
"""Derive one or more keys from a master secret using
the HMAC-based KDF defined in RFC5869_.
This KDF is not suitable for deriving keys from a password or for key
stretching. Use `PBKDF2` instead.
HKDF is a key derivation method approved by NIST in `SP 800 56C`__.
:Parameters:
master : byte string
The unguessable value used by the KDF to generate the other keys.
It must be a high-entropy secret, though not necessarily uniform.
It must not be a password.
salt : byte string
A non-secret, reusable value that strengthens the randomness
extraction step.
Ideally, it is as long as the digest size of the chosen hash.
If empty, a string of zeroes in used.
key_len : integer
The length in bytes of every derived key.
hashmod : module
A cryptographic hash algorithm from `Cryptodome.Hash`.
`Cryptodome.Hash.SHA512` is a good choice.
num_keys : integer
The number of keys to derive. Every key is ``key_len`` bytes long.
The maximum cumulative length of all keys is
255 times the digest size.
context : byte string
Optional identifier describing what the keys are used for.
:Return: A byte string or a tuple of byte strings.
.. _RFC5869: http://tools.ietf.org/html/rfc5869
.. __: http://csrc.nist.gov/publications/nistpubs/800-56C/SP-800-56C.pdf
"""
output_len = key_len * num_keys
if output_len > (255 * hashmod.digest_size):
raise ValueError("Too much secret data to derive")
if not salt:
salt = bchr(0) * hashmod.digest_size
if context is None:
context = b("")
# Step 1: extract
hmac = HMAC.new(salt, master, digestmod=hashmod)
prk = hmac.digest()
# Step 2: expand
t = [b("")]
n = 1
tlen = 0
while tlen < output_len:
hmac = HMAC.new(prk, t[-1] + context + bchr(n), digestmod=hashmod)
t.append(hmac.digest())
tlen += hashmod.digest_size
n += 1
derived_output = b("").join(t)
if num_keys == 1:
return derived_output[:key_len]
kol = [derived_output[idx:idx + key_len]
for idx in xrange(0, output_len, key_len)]
return list(kol[:num_keys])
def _scryptBlockMix(blocks, len_blocks):
"""Hash function for ROMix."""
x = blocks[-1]
core = _raw_salsa20_lib.Salsa20_8_core
result = [ create_string_buffer(64) for _ in range(len(blocks)) ]
for i in xrange(len(blocks)):
core(x, blocks[i], result[i])
x = result[i]
return [result[i + j] for j in xrange(2)
for i in xrange(0, len_blocks, 2)]
def _scryptROMix(blocks, n):
"""Sequential memory-hard function for scrypt."""
x = [blocks[i:i + 64] for i in xrange(0, len(blocks), 64)]
len_x = len(x)
v = [None]*n
load_le_uint32 = _raw_salsa20_lib.load_le_uint32
for i in xrange(n):
v[i] = x
x = _scryptBlockMix(x, len_x)
for i in xrange(n):
j = load_le_uint32(x[-1]) & (n - 1)
t = [strxor(x[idx], v[j][idx]) for idx in xrange(len_x)]
x = _scryptBlockMix(t, len_x)
return b("").join([get_raw_buffer(y) for y in x])
def scrypt(password, salt, key_len, N, r, p, num_keys=1):
"""Derive one or more keys from a passphrase.
This function performs key derivation according to
the `scrypt`_ algorithm, introduced in Percival's paper
`"Stronger key derivation via sequential memory-hard functions"`__.
This implementation is based on the `RFC draft`__.
:Parameters:
password : string
The secret pass phrase to generate the keys from.
salt : string
A string to use for better protection from dictionary attacks.
This value does not need to be kept secret,
but it should be randomly chosen for each derivation.
It is recommended to be at least 8 bytes long.
key_len : integer
The length in bytes of every derived key.
N : integer
CPU/Memory cost parameter. It must be a power of 2 and less
than ``2**32``.
r : integer
Block size parameter.
p : integer
Parallelization parameter.
It must be no greater than ``(2**32-1)/(4r)``.
num_keys : integer
The number of keys to derive. Every key is ``key_len`` bytes long.
By default, only 1 key is generated.
The maximum cumulative length of all keys is ``(2**32-1)*32``
(that is, 128TB).
A good choice of parameters *(N, r , p)* was suggested
by <NAME> in his `presentation in 2009`__:
- *(16384, 8, 1)* for interactive logins (<=100ms)
- *(1048576, 8, 1)* for file encryption (<=5s)
:Return: A byte string or a tuple of byte strings.
.. _scrypt: http://www.tarsnap.com/scrypt.html
.. __: http://www.tarsnap.com/scrypt/scrypt.pdf
.. __: http://tools.ietf.org/html/draft-josefsson-scrypt-kdf-03
.. __: http://www.tarsnap.com/scrypt/scrypt-slides.pdf
"""
if 2 ** (bit_size(N) - 1) != N:
raise ValueError("N must be a power of 2")
if N >= 2 ** 32:
raise ValueError("N is too big")
if p > ((2 ** 32 - 1) * 32) // (128 * r):
raise ValueError("p or r are too big")
prf_hmac_sha256 = lambda p, s: HMAC.new(p, s, SHA256).digest()
blocks = PBKDF2(password, salt, p * 128 * r, 1, prf=prf_hmac_sha256)
blocks = b("").join([_scryptROMix(blocks[x:x + 128 * r], N)
for x in xrange(0, len(blocks), 128 * r)])
dk = PBKDF2(password, blocks, key_len * num_keys, 1,
prf=prf_hmac_sha256)
if num_keys == 1:
return dk
kol = [dk[idx:idx + key_len]
for idx in xrange(0, key_len * num_keys, key_len)]
return kol
|
torchbearer/callbacks/checkpointers.py | NunoEdgarGFlowHub/torchbearer | 358 | 12609220 | import torchbearer
import torch
from torchbearer.callbacks.callbacks import Callback
import os
import warnings
from torchbearer.bases import get_metric
class _Checkpointer(Callback):
def __init__(self, fileformat, save_model_params_only=False, pickle_module=torch.serialization.pickle, pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(_Checkpointer, self).__init__()
self.fileformat = fileformat
self.pickle_module = pickle_module
self.pickle_protocol = pickle_protocol
self.save_model_params_only = save_model_params_only
self.most_recent = None
if fileformat.__contains__(os.sep) and not os.path.exists(os.path.dirname(fileformat)):
os.makedirs(os.path.dirname(fileformat))
def save_checkpoint(self, model_state, overwrite_most_recent=False):
state = {}
state.update(model_state)
state.update(model_state[torchbearer.METRICS])
string_state = {str(key): state[key] for key in state.keys()}
filepath = self.fileformat.format(**string_state)
if self.most_recent is not None and overwrite_most_recent:
try:
os.remove(self.most_recent)
except OSError:
warnings.warn('Failed to delete old file. Are you running two checkpointers with the same filename?')
if self.save_model_params_only:
torch.save(model_state[torchbearer.MODEL].state_dict(), filepath, pickle_module=self.pickle_module,
pickle_protocol=self.pickle_protocol)
else:
torch.save(model_state[torchbearer.SELF].state_dict(), filepath, pickle_module=self.pickle_module,
pickle_protocol=self.pickle_protocol)
self.most_recent = filepath
def ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False,
monitor='val_loss', save_best_only=False, mode='auto', period=1, min_delta=0):
"""Save the model after every epoch. `filepath` can contain named formatting options, which will be filled any
values from state. For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints
will be saved with the epoch number and the validation loss in the filename. The torch :class:`.Trial` will be
saved to filename.
Example: ::
>>> from torchbearer.callbacks import ModelCheckpoint
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = ModelCheckpoint('my_path.pt', monitor='val_acc', mode='max')
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
monitor (str): Quantity to monitor
save_best_only (bool): If `save_best_only=True`, the latest best model according to the quantity
monitored will not be overwritten
mode (str): One of {auto, min, max}. If `save_best_only=True`, the decision to overwrite the current
save file is made based on either the maximization or the minimization of the monitored quantity. For
`val_acc`, this should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
period (int): Interval (number of epochs) between checkpoints
min_delta (float): If `save_best_only=True`, this is the minimum improvement required to trigger a save
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
if save_best_only:
check = Best(filepath, save_model_params_only, monitor, mode, period, min_delta)
else:
check = Interval(filepath, save_model_params_only, period)
return check
class MostRecent(_Checkpointer):
"""Model checkpointer which saves the most recent model to a given filepath. `filepath` can contain named
formatting options, which will be filled any values from state. For example: if `filepath` is
`weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the
validation loss in the filename.
Example: ::
>>> from torchbearer.callbacks import MostRecent
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = MostRecent('my_path.pt')
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
pickle_module (module): The pickle module to use, default is 'torch.serialization.pickle'
pickle_protocol (int): The pickle protocol to use, default is 'torch.serialization.DEFAULT_PROTOCOL'
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
def __init__(self, filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False,
pickle_module=torch.serialization.pickle, pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(MostRecent, self).__init__(filepath, save_model_params_only=save_model_params_only,
pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.filepath = filepath
def on_checkpoint(self, state):
super(MostRecent, self).on_end_epoch(state)
self.save_checkpoint(state, overwrite_most_recent=True)
class Best(_Checkpointer):
"""Model checkpointer which saves the best model according to the given configurations. `filepath` can contain
named formatting options, which will be filled any values from state. For example: if `filepath` is
`weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the
validation loss in the filename.
Example: ::
>>> from torchbearer.callbacks import Best
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = Best('my_path.pt', monitor='val_acc', mode='max')
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
monitor (str): Quantity to monitor
mode (str): One of {auto, min, max}. If `save_best_only=True`, the decision to overwrite the current save file
is made based on either the maximization or the minimization of the monitored quantity. For `val_acc`, this
should be `max`, for `val_loss` this should be `min`, etc. In `auto` mode, the direction is automatically
inferred from the name of the monitored quantity.
period (int): Interval (number of epochs) between checkpoints
min_delta (float): If `save_best_only=True`, this is the minimum improvement required to trigger a save
pickle_module (module): The pickle module to use, default is 'torch.serialization.pickle'
pickle_protocol (int): The pickle protocol to use, default is 'torch.serialization.DEFAULT_PROTOCOL'
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist, with the `monitor` key populated
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
def __init__(self, filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False, monitor='val_loss',
mode='auto', period=1, min_delta=0, pickle_module=torch.serialization.pickle,
pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(Best, self).__init__(filepath, save_model_params_only=save_model_params_only,
pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.min_delta = min_delta
self.mode = mode
self.monitor = monitor
self.period = period
self.epochs_since_last_save = 0
if self.mode not in ['min', 'max']:
if 'acc' in self.monitor:
self.mode = 'max'
else:
self.mode = 'min'
if self.mode == 'min':
self.min_delta *= -1
self.monitor_op = lambda x1, x2: (x1-self.min_delta) < x2
elif self.mode == 'max':
self.min_delta *= 1
self.monitor_op = lambda x1, x2: (x1-self.min_delta) > x2
self.best = None
def state_dict(self):
state_dict = super(Best, self).state_dict()
state_dict['epochs'] = self.epochs_since_last_save
state_dict['best'] = self.best
return state_dict
def load_state_dict(self, state_dict):
super(Best, self).load_state_dict(state_dict)
self.epochs_since_last_save = state_dict['epochs']
self.best = state_dict['best']
return self
def on_start(self, state):
if self.best is None:
self.best = float('inf') if self.mode == 'min' else -float('inf')
def on_checkpoint(self, state):
super(Best, self).on_end_epoch(state)
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
current = get_metric('Best Checkpoint', state, self.monitor)
if current is None:
return
if self.monitor_op(current, self.best):
self.best = current
self.save_checkpoint(state, overwrite_most_recent=True)
class Interval(_Checkpointer):
"""Model checkpointer which which saves the model every 'period' epochs to the given filepath. `filepath` can
contain named formatting options, which will be filled any values from state. For example: if `filepath` is
`weights.{epoch:02d}-{val_loss:.2f}`, then the model checkpoints will be saved with the epoch number and the
validation loss in the filename.
Example: ::
>>> from torchbearer.callbacks import Interval
>>> from torchbearer import Trial
>>> import torch
# Example Trial (without optimiser or loss criterion) which uses this checkpointer
>>> model = torch.nn.Linear(1,1)
>>> checkpoint = Interval('my_path.pt', period=100, on_batch=True)
>>> trial = Trial(model, callbacks=[checkpoint], metrics=['acc'])
Args:
filepath (str): Path to save the model file
save_model_params_only (bool): If `save_model_params_only=True`, only model parameters will be saved so that
the results can be loaded into a PyTorch nn.Module. The other option, `save_model_params_only=False`,
should be used only if the results will be loaded into a Torchbearer Trial object later.
period (int): Interval (number of steps) between checkpoints
on_batch (bool): If true step each batch, if false step each epoch.
period (int): Interval (number of epochs) between checkpoints
pickle_module (module): The pickle module to use, default is 'torch.serialization.pickle'
pickle_protocol (int): The pickle protocol to use, default is 'torch.serialization.DEFAULT_PROTOCOL'
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `state_dict` method
- :attr:`torchbearer.state.METRICS`: Metrics dictionary should exist
- :attr:`torchbearer.state.SELF`: Self should be the :attr:`torchbearer.Trial` which is running this callback
"""
def __init__(self, filepath='model.{epoch:02d}-{val_loss:.2f}.pt', save_model_params_only=False, period=1, on_batch=False, pickle_module=torch.serialization.pickle, pickle_protocol=torch.serialization.DEFAULT_PROTOCOL):
super(Interval, self).__init__(filepath, save_model_params_only=save_model_params_only,
pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.period = period
self.epochs_since_last_save = 0
if on_batch:
self.on_step_training = self.on_checkpoint
self.on_checkpoint = lambda _: None
def state_dict(self):
state_dict = super(Interval, self).state_dict()
state_dict['epochs'] = self.epochs_since_last_save
return state_dict
def load_state_dict(self, state_dict):
super(Interval, self).load_state_dict(state_dict)
self.epochs_since_last_save = state_dict['epochs']
return self
def on_checkpoint(self, state):
super(Interval, self).on_end_epoch(state)
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
self.save_checkpoint(state)
|
09_deploy/common/sagemaker_rl/orchestrator/utils/cloudwatch_logger.py | ichen20/oreilly_book | 2,327 | 12609226 | import time
import json
class CloudWatchLogger:
def __init__(self, cw_client, region_name):
self.region_name = region_name
self.cw_client = cw_client
def get_cloudwatch_dashboard_details(self, experiment_id):
# update for non-commercial region
cw_dashboard_url = f"https://{self.region_name}.console.aws.amazon.com/cloudwatch/home?region={self.region_name}#dashboards:name={experiment_id};start=PT1H"
text = f"You can monitor your Training/Hosting evaluation metrics on this [CloudWatch Dashboard]({cw_dashboard_url})"
text += (
"\n\n(Note: This would need Trained/Hosted Models to be evaluated in order to publish Evaluation Scores)"
)
return text
def publish_latest_hosting_information(self, experiment_id, latest_hosted_model_id, latest_hosted_model_score):
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "latest_hosted_model_id_continuous",
"Timestamp": time.time(),
"Value": int(latest_hosted_model_id.split("-")[-1]),
}
],
)
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "latest_hosted_model_score_continuous",
"Timestamp": time.time(),
"Value": float(latest_hosted_model_score),
}
],
)
def publish_latest_training_information(self, experiment_id, latest_trained_model_id, latest_trained_model_score):
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "latest_trained_model_id_continuous",
"Timestamp": time.time(),
"Value": int(latest_trained_model_id.split("-")[-1]),
}
],
)
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "latest_trained_model_score_continuous",
"Timestamp": time.time(),
"Value": float(latest_trained_model_score),
}
],
)
def publish_newly_trained_model_eval_information(
self, experiment_id, new_trained_model_id, new_trained_model_score
):
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "newly_trained_model_id",
"Timestamp": time.time(),
"Value": int(new_trained_model_id.split("-")[-1]),
}
],
)
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "newly_trained_model_score",
"Timestamp": time.time(),
"Value": float(new_trained_model_score),
}
],
)
def publish_rewards_for_simulation(self, experiment_id, reported_rewards_sum):
self.cw_client.put_metric_data(
Namespace=experiment_id,
MetricData=[
{
"MetricName": "reported_rewards_score",
"Timestamp": time.time(),
"Value": float(reported_rewards_sum),
}
],
)
def create_cloudwatch_dashboard_from_experiment_id(self, experiment_id):
cw_json = self.get_cloudwatch_dashboard_json_for_experiment_id(experiment_id, self.region_name)
self.cw_client.put_dashboard(DashboardName=experiment_id, DashboardBody=cw_json)
def get_cloudwatch_dashboard_json_for_experiment_id(self, experiment_id, region_name):
dashboard_json = {
"widgets": [
{
"type": "metric",
"x": 0,
"y": 0,
"width": 9,
"height": 3,
"properties": {
"metrics": [
[
experiment_id,
"latest_hosted_model_id_continuous",
{"label": "(ModelId suffix part only)"},
]
],
"view": "singleValue",
"region": region_name,
"title": "Currently Hosted Model Id",
"period": 60,
"stat": "Maximum",
},
},
{
"type": "metric",
"x": 9,
"y": 0,
"width": 9,
"height": 3,
"properties": {
"metrics": [[experiment_id, "latest_hosted_model_score_continuous", {"label": "EvalScore"}]],
"view": "singleValue",
"region": region_name,
"title": "Currently Hosted Model Eval Score (On latest data)",
"period": 60,
"stat": "Minimum",
},
},
{
"type": "metric",
"x": 0,
"y": 3,
"width": 9,
"height": 3,
"properties": {
"metrics": [
[experiment_id, "latest_trained_model_id_continuous", {"label": "(ModelId suffix only)"}]
],
"view": "singleValue",
"region": region_name,
"title": "Latest Trained Model Id",
"stat": "Maximum",
"period": 60,
"setPeriodToTimeRange": False,
"stacked": True,
},
},
{
"type": "metric",
"x": 9,
"y": 3,
"width": 9,
"height": 3,
"properties": {
"metrics": [[experiment_id, "latest_trained_model_score_continuous", {"label": "EvalScore"}]],
"view": "singleValue",
"region": region_name,
"title": "Latest Trained Model Eval Score",
"period": 60,
"stat": "Maximum",
},
},
{
"type": "metric",
"x": 9,
"y": 6,
"width": 9,
"height": 9,
"properties": {
"metrics": [[experiment_id, "newly_trained_model_score", {"label": "EvalScore"}]],
"view": "timeSeries",
"stacked": False,
"region": region_name,
"stat": "Maximum",
"period": 60,
"title": "New Model Eval Score Over Time",
"yAxis": {"left": {"min": 0, "max": 1}},
},
},
{
"type": "metric",
"x": 0,
"y": 6,
"width": 9,
"height": 9,
"properties": {
"metrics": [[experiment_id, "reported_rewards_score", {"label": "Rewards"}]],
"view": "timeSeries",
"stacked": False,
"region": region_name,
"stat": "Maximum",
"period": 60,
"title": "Experiment's Reported Rewards",
"yAxis": {"left": {"min": 0, "max": 1}},
"liveData": True,
"legend": {"position": "bottom"},
},
},
]
}
return json.dumps(dashboard_json)
|
crabageprediction/venv/Lib/site-packages/pandas/tests/series/methods/test_update.py | 13rianlucero/CrabAgePrediction | 28,899 | 12609227 | <filename>crabageprediction/venv/Lib/site-packages/pandas/tests/series/methods/test_update.py
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
CategoricalDtype,
DataFrame,
NaT,
Series,
Timestamp,
)
import pandas._testing as tm
class TestUpdate:
def test_update(self):
s = Series([1.5, np.nan, 3.0, 4.0, np.nan])
s2 = Series([np.nan, 3.5, np.nan, 5.0])
s.update(s2)
expected = Series([1.5, 3.5, 3.0, 5.0, np.nan])
tm.assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
df["c"].update(Series(["foo"], index=[0]))
expected = DataFrame(
[[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"other, dtype, expected",
[
# other is int
([61, 63], "int32", Series([10, 61, 12], dtype="int32")),
([61, 63], "int64", Series([10, 61, 12])),
([61, 63], float, Series([10.0, 61.0, 12.0])),
([61, 63], object, Series([10, 61, 12], dtype=object)),
# other is float, but can be cast to int
([61.0, 63.0], "int32", Series([10, 61, 12], dtype="int32")),
([61.0, 63.0], "int64", Series([10, 61, 12])),
([61.0, 63.0], float, Series([10.0, 61.0, 12.0])),
([61.0, 63.0], object, Series([10, 61.0, 12], dtype=object)),
# others is float, cannot be cast to int
([61.1, 63.1], "int32", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], "int64", Series([10.0, 61.1, 12.0])),
([61.1, 63.1], float, Series([10.0, 61.1, 12.0])),
([61.1, 63.1], object, Series([10, 61.1, 12], dtype=object)),
# other is object, cannot be cast
([(61,), (63,)], "int32", Series([10, (61,), 12])),
([(61,), (63,)], "int64", Series([10, (61,), 12])),
([(61,), (63,)], float, Series([10.0, (61,), 12.0])),
([(61,), (63,)], object, Series([10, (61,), 12])),
],
)
def test_update_dtypes(self, other, dtype, expected):
ser = Series([10, 11, 12], dtype=dtype)
other = Series(other, index=[1, 3])
ser.update(other)
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"series, other, expected",
[
# update by key
(
Series({"a": 1, "b": 2, "c": 3, "d": 4}),
{"b": 5, "c": np.nan},
Series({"a": 1, "b": 5, "c": 3, "d": 4}),
),
# update by position
(Series([1, 2, 3, 4]), [np.nan, 5, 1], Series([1, 5, 1, 4])),
],
)
def test_update_from_non_series(self, series, other, expected):
# GH 33215
series.update(other)
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
"data, other, expected, dtype",
[
(["a", None], [None, "b"], ["a", "b"], "string[python]"),
pytest.param(
["a", None],
[None, "b"],
["a", "b"],
"string[pyarrow]",
marks=td.skip_if_no("pyarrow", min_version="1.0.0"),
),
([1, None], [None, 2], [1, 2], "Int64"),
([True, None], [None, False], [True, False], "boolean"),
(
["a", None],
[None, "b"],
["a", "b"],
CategoricalDtype(categories=["a", "b"]),
),
(
[Timestamp(year=2020, month=1, day=1, tz="Europe/London"), NaT],
[NaT, Timestamp(year=2020, month=1, day=1, tz="Europe/London")],
[Timestamp(year=2020, month=1, day=1, tz="Europe/London")] * 2,
"datetime64[ns, Europe/London]",
),
],
)
def test_update_extension_array_series(self, data, other, expected, dtype):
result = Series(data, dtype=dtype)
other = Series(other, dtype=dtype)
expected = Series(expected, dtype=dtype)
result.update(other)
tm.assert_series_equal(result, expected)
def test_update_with_categorical_type(self):
# GH 25744
dtype = CategoricalDtype(["a", "b", "c", "d"])
s1 = Series(["a", "b", "c"], index=[1, 2, 3], dtype=dtype)
s2 = Series(["b", "a"], index=[1, 2], dtype=dtype)
s1.update(s2)
result = s1
expected = Series(["b", "a", "c"], index=[1, 2, 3], dtype=dtype)
tm.assert_series_equal(result, expected)
|
app/iclass/models/editor.py | edisonlz/fastor | 285 | 12609256 | # coding=utf-8
from django.db import models
from wi_cache import function_cache
from django.conf import settings
from app.iclass.utils.short_id import ShortID
class Editor(models.Model):
"""
编辑内容转URL
"""
STATUS_NORMAL = 0
STATUS_CLOSE = 1
title = models.CharField(max_length=255, verbose_name=u'标题', default='')
content = models.TextField(verbose_name=u'内容')
status = models.IntegerField(verbose_name=u'状态', default=1, db_index=True)
created_at = models.DateTimeField(auto_now_add=True, verbose_name='创建时间', db_index=True)
updated_at = models.DateTimeField(auto_now=True, verbose_name='更新时间', db_index=True)
class Meta:
app_label = "iclass"
def to_json(self):
r = {
"id": self.id,
"title": self.title,
"content": self.content,
"created_at": self.created_at.strftime("%Y-%m-%d %H:%M:%S"),
}
return r
@property
def status_ch(self):
if self.status == self.STATUS_NORMAL:
return '开启'
else:
return '关闭'
@classmethod
def get_editor(cls, _id):
return Editor.objects.filter(id=_id).first()
@property
def short_id(self):
return ShortID().toHex(self.id)
@property
def short_url(self):
return "%s/e/%s" % (settings.HOST, self.short_id)
|
PhysicsTools/PatAlgos/python/patTestJEC_cfi.py | ckamtsikis/cmssw | 852 | 12609268 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
# Be sure to change the "V5" to whatever is in your payloads.
from CondCore.DBCommon.CondDBSetup_cfi import *
jec = cms.ESSource("PoolDBESSource",CondDBSetup,
connect = cms.string("frontier://FrontierPrep/CMS_COND_PHYSICSTOOLS"),
toGet = cms.VPSet(
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_AK4Calo"),
label=cms.untracked.string("AK4Calo")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_AK4PF"),
label=cms.untracked.string("AK4PF")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_AK4JPT"),
label=cms.untracked.string("AK4JPT")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_AK4TRK"),
label=cms.untracked.string("AK4TRK")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_AK7Calo"),
label=cms.untracked.string("AK7Calo")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_AK7PF"),
label=cms.untracked.string("AK7PF")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_IC5Calo"),
label=cms.untracked.string("IC5Calo")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_IC5PF"),
label=cms.untracked.string("IC5PF")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_KT4Calo"),
label=cms.untracked.string("KT4Calo")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_KT4PF"),
label=cms.untracked.string("KT4PF")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_KT6Calo"),
label=cms.untracked.string("KT6PF")),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_Jec10V1_KT6PF"),
label=cms.untracked.string("KT6PF")),
)
)
es_prefer_jec = cms.ESPrefer("PoolDBESSource","jec")
|
testcases/common.py | daxlab/pyalgotrade | 1,000 | 12609330 | <filename>testcases/common.py
# PyAlgoTrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import csv
import os
import shutil
import subprocess
import tempfile
import unittest
# Force matplotlib to not use any Xwindows backend.
import matplotlib
matplotlib.use('Agg')
from pyalgotrade import dataseries
class RunResults(object):
def __init__(self, retcode, output):
self.__retcode = retcode
self.__output = output
def exit_ok(self):
return self.__retcode == 0
def get_output(self):
return self.__output
def get_output_lines(self, skip_last_line=False):
ret = self.__output.split("\n")
# Skip the last, empty line.
if skip_last_line:
ret = ret[:-1]
return ret
def run_cmd(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
return RunResults(retcode, output)
def run_python_code(code):
cmd = ["python"]
cmd.append("-u")
cmd.append("-c")
cmd.append(code)
return run_cmd(cmd)
def run_sample_script(script, params=[]):
cmd = ["python"]
cmd.append("-u")
cmd.append(os.path.join("samples", script))
cmd.extend(params)
return run_cmd(cmd)
def get_file_lines(fileName):
rawLines = open(fileName, "r").readlines()
return [rawLine.strip() for rawLine in rawLines]
def compare_head(fileName, lines, path="samples"):
assert(len(lines) > 0)
fileLines = get_file_lines(os.path.join(path, fileName))
return fileLines[0:len(lines)] == lines
def compare_tail(fileName, lines, path="samples"):
assert(len(lines) > 0)
fileLines = get_file_lines(os.path.join(path, fileName))
return fileLines[len(lines)*-1:] == lines
def tail_file(fileName, line_count, path="samples"):
lines = get_file_lines(os.path.join(path, fileName))
return lines[line_count*-1:]
def load_test_csv(path):
inputSeq = []
expectedSeq = []
csvFile = open(path, "r")
reader = csv.DictReader(csvFile)
for row in reader:
inputSeq.append(float(row["Input"]))
expected = row["Expected"]
if not expected:
expected = None
else:
expected = float(expected)
expectedSeq.append(expected)
return inputSeq, expectedSeq
def get_data_file_path(fileName):
return os.path.join(os.path.split(__file__)[0], "data", fileName)
def test_from_csv(testcase, filename, filterClassBuilder, roundDecimals=2, maxLen=dataseries.DEFAULT_MAX_LEN):
inputValues, expectedValues = load_test_csv(get_data_file_path(filename))
inputDS = dataseries.SequenceDataSeries(maxLen=maxLen)
filterDS = filterClassBuilder(inputDS)
for i in xrange(len(inputValues)):
inputDS.append(inputValues[i])
value = safe_round(filterDS[i], roundDecimals)
expectedValue = safe_round(expectedValues[i], roundDecimals)
testcase.assertEqual(value, expectedValue)
def init_temp_path():
storage = get_temp_path()
if not os.path.exists(storage):
os.mkdir(storage)
def get_temp_path():
return "data"
def safe_round(number, ndigits):
ret = None
if number is not None:
ret = round(number, ndigits)
return ret
class CopyFiles:
def __init__(self, files, dst):
self.__files = files
self.__dst = dst
self.__toRemove = []
def __enter__(self):
for src in self.__files:
shutil.copy2(src, self.__dst)
if os.path.isdir(self.__dst):
self.__toRemove.append(os.path.join(self.__dst, os.path.basename(src)))
else:
self.__toRemove.append(self.__dst)
def __exit__(self, exc_type, exc_val, exc_tb):
for src in self.__toRemove:
os.remove(src)
class TmpDir(object):
def __init__(self):
self.__tmpdir = None
def __enter__(self):
self.__tmpdir = tempfile.mkdtemp()
return self.__tmpdir
def __exit__(self, exc_type, exc_val, exc_tb):
if self.__tmpdir is not None:
shutil.rmtree(self.__tmpdir)
class TestCase(unittest.TestCase):
pass
|
feed/utils.py | nonomal/oh-my-rss | 270 | 12609349 | # -*- coding: utf-8 -*-
import os
from ohmyrss.settings import CRAWL_FLAG_DIR
import time
import urllib
import hashlib
def mkdir(directory):
return os.makedirs(directory, exist_ok=True)
def get_hash_name(s):
return hashlib.md5(s.encode('utf8')).hexdigest()
def is_crawled_url(url):
url_hash = get_hash_name(url)
flag_dir = os.path.join(CRAWL_FLAG_DIR, url_hash[0], url_hash[-1])
flag_file = os.path.join(flag_dir, url_hash)
return os.path.exists(flag_file)
def mark_crawled_url(*urls):
# 跳转前后要设置;有些有中文路径的需要还原
for url in urls:
write_crawl_flag_file(url)
if url != urllib.parse.unquote(url):
write_crawl_flag_file(urllib.parse.unquote(url))
return True
def write_crawl_flag_file(url):
url_hash = get_hash_name(url)
flag_dir = os.path.join(CRAWL_FLAG_DIR, url_hash[0], url_hash[-1])
mkdir(flag_dir)
flag_file = os.path.join(flag_dir, url_hash)
if os.path.exists(flag_file):
return True
try:
open(flag_file, 'w').close()
except:
return False
return True
def current_ts():
return int(time.time() * 1000)
|
menpo/feature/features.py | apapaion/menpo | 311 | 12609350 | import itertools
import warnings
import numpy as np
from .base import imgfeature, ndfeature
@ndfeature
def gradient(pixels):
r"""
Calculates the gradient of an input image. The image is assumed to have
channel information on the first axis. In the case of multiple channels,
it returns the gradient over each axis over each channel as the first axis.
The gradient is computed using second order accurate central differences in
the interior and first order accurate one-side (forward or backwards)
differences at the boundaries.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array where the first dimension
is interpreted as channels. This means an N-dimensional image is
represented by an N+1 dimensional array.
If the image is 2-dimensional the pixels should be of type
float/double (int is not supported).
Returns
-------
gradient : `ndarray`
The gradient over each axis over each channel. Therefore, the
first axis of the gradient of a 2D, single channel image, will have
length `2`. The first axis of the gradient of a 2D, 3-channel image,
will have length `6`, the ordering being
``I[:, 0, 0] = [R0_y, G0_y, B0_y, R0_x, G0_x, B0_x]``. To be clear,
all the ``y``-gradients are returned over each channel, then all
the ``x``-gradients.
"""
if pixels.dtype == np.uint8:
raise TypeError("Attempting to take the gradient on a uint8 image.")
n_dims = pixels.ndim - 1
grad_per_dim_per_channel = [np.gradient(g, edge_order=1) for g in pixels]
# Flatten out the separate dims
grad_per_channel = list(itertools.chain.from_iterable(grad_per_dim_per_channel))
# Add a channel axis for broadcasting
grad_per_channel = [g[None, ...] for g in grad_per_channel]
# Permute the list so it is first axis, second axis, etc
grad_per_channel = [grad_per_channel[i::n_dims] for i in range(n_dims)]
grad_per_channel = list(itertools.chain.from_iterable(grad_per_channel))
# Concatenate gradient list into an array (the new_image)
return np.concatenate(grad_per_channel, axis=0)
@ndfeature
def gaussian_filter(pixels, sigma):
r"""
Calculates the convolution of the input image with a multidimensional
Gaussian filter.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
sigma : `float` or `list` of `float`
The standard deviation for Gaussian kernel. The standard deviations of
the Gaussian filter are given for each axis as a `list`, or as a single
`float`, in which case it is equal for all axes.
Returns
-------
output_image : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The filtered image has the same type and size as the input ``pixels``.
"""
from scipy.ndimage import gaussian_filter as scipy_gaussian_filter # expensive
output = np.empty(pixels.shape, dtype=pixels.dtype)
for dim in range(pixels.shape[0]):
scipy_gaussian_filter(pixels[dim], sigma, output=output[dim])
return output
@ndfeature
def igo(pixels, double_angles=False, verbose=False):
r"""
Extracts Image Gradient Orientation (IGO) features from the input image.
The output image has ``N * C`` number of channels, where ``N`` is the
number of channels of the original image and ``C = 2`` or ``C = 4``
depending on whether double angles are used.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
double_angles : `bool`, optional
Assume that ``phi`` represents the gradient orientations.
If this flag is ``False``, the features image is the concatenation of
``cos(phi)`` and ``sin(phi)``, thus 2 channels.
If ``True``, the features image is the concatenation of
``cos(phi)``, ``sin(phi)``, ``cos(2 * phi)``, ``sin(2 * phi)``, thus 4
channels.
verbose : `bool`, optional
Flag to print IGO related information.
Returns
-------
igo : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The IGO features image. It has the same type and shape as the input
``pixels``. The output number of channels depends on the
``double_angles`` flag.
Raises
------
ValueError
Image has to be 2D in order to extract IGOs.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Subspace learning
from image gradient orientations", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 34, num. 12, p. 2454--2466, 2012.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError(
"IGOs only work on 2D images. Expects image data "
"to be 3D, channels + shape."
)
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_chnls = 2
if double_angles:
feat_chnls = 4
# compute gradients
grad = gradient(pixels)
# compute angles
grad_orient = np.angle(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute igo image
igo_pixels = np.empty(
(n_img_chnls * feat_chnls, pixels.shape[1], pixels.shape[2]), dtype=pixels.dtype
)
if double_angles:
dbl_grad_orient = 2 * grad_orient
# y angles
igo_pixels[:n_img_chnls] = np.sin(grad_orient)
igo_pixels[n_img_chnls : n_img_chnls * 2] = np.sin(dbl_grad_orient)
# x angles
igo_pixels[n_img_chnls * 2 : n_img_chnls * 3] = np.cos(grad_orient)
igo_pixels[n_img_chnls * 3 :] = np.cos(dbl_grad_orient)
else:
igo_pixels[:n_img_chnls] = np.sin(grad_orient) # y
igo_pixels[n_img_chnls:] = np.cos(grad_orient) # x
# print information
if verbose:
info_str = "IGO Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls
)
info_str = "{} - Double angles are {}.\n".format(
info_str, "enabled" if double_angles else "disabled"
)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, igo_pixels.shape[2], igo_pixels.shape[1], n_img_chnls
)
print(info_str)
return igo_pixels
@ndfeature
def es(pixels, verbose=False):
r"""
Extracts Edge Structure (ES) features from the input image. The output image
has ``N * C`` number of channels, where ``N`` is the number of channels of
the original image and ``C = 2``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either an image object itself or an array where the first axis
represents the number of channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
verbose : `bool`, optional
Flag to print ES related information.
Returns
-------
es : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is ``C = 2``.
Raises
------
ValueError
Image has to be 2D in order to extract ES features.
References
----------
.. [1] <NAME>, <NAME>, "On representing edge structure for model
matching", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2001.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError(
"ES features only work on 2D images. Expects "
"image data to be 3D, channels + shape."
)
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_channels = 2
# compute gradients
grad = gradient(pixels)
# compute magnitude
grad_abs = np.abs(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute es image
grad_abs = grad_abs + np.median(grad_abs)
es_pixels = np.empty(
(pixels.shape[0] * feat_channels, pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype,
)
es_pixels[:n_img_chnls] = grad[:n_img_chnls] / grad_abs
es_pixels[n_img_chnls:] = grad[n_img_chnls:] / grad_abs
# print information
if verbose:
info_str = "ES Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls
)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, es_pixels.shape[2], es_pixels.shape[1], n_img_chnls
)
print(info_str)
return es_pixels
@ndfeature
def daisy(
pixels,
step=1,
radius=15,
rings=2,
histograms=2,
orientations=8,
normalization="l1",
sigmas=None,
ring_radii=None,
verbose=False,
):
r"""
Extracts Daisy features from the input image. The output image has ``N * C``
number of channels, where ``N`` is the number of channels of the original
image and ``C`` is the feature channels determined by the input options.
Specifically, ``C = (rings * histograms + 1) * orientations``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
step : `int`, optional
The sampling step that defines the density of the output image.
radius : `int`, optional
The radius (in pixels) of the outermost ring.
rings : `int`, optional
The number of rings to be used.
histograms : `int`, optional
The number of histograms sampled per ring.
orientations : `int`, optional
The number of orientations (bins) per histogram.
normalization : [ 'l1', 'l2', 'daisy', None ], optional
It defines how to normalize the descriptors
If 'l1' then L1-normalization is applied at each descriptor.
If 'l2' then L2-normalization is applied at each descriptor.
If 'daisy' then L2-normalization is applied at individual histograms.
If None then no normalization is employed.
sigmas : `list` of `float` or ``None``, optional
Standard deviation of spatial Gaussian smoothing for the centre
histogram and for each ring of histograms. The `list` of sigmas should
be sorted from the centre and out. I.e. the first sigma value defines
the spatial smoothing of the centre histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the `rings` parameter by setting ``rings = len(sigmas) - 1``.
ring_radii : `list` of `float` or ``None``, optional
Radius (in pixels) for each ring. Specifying `ring_radii` overrides the
`rings` and `radius` parameters by setting ``rings = len(ring_radii)``
and ``radius = ring_radii[-1]``.
If both sigmas and ring_radii are given, they must satisfy ::
len(ring_radii) == len(sigmas) + 1
since no radius is needed for the centre histogram.
verbose : `bool`
Flag to print Daisy related information.
Returns
-------
daisy : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = (rings * histograms + 1) * orientations``.
Raises
------
ValueError
len(sigmas)-1 != len(ring_radii)
ValueError
Invalid normalization method.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Daisy: An efficient dense descriptor
applied to wide-baseline stereo", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 32, num. 5, p. 815-830, 2010.
"""
from menpo.external.skimage._daisy import _daisy
# Parse options
if (
sigmas is not None
and ring_radii is not None
and len(sigmas) - 1 != len(ring_radii)
):
raise ValueError("`len(sigmas)-1 != len(ring_radii)`")
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization is None:
normalization = "off"
if normalization not in ["l1", "l2", "daisy", "off"]:
raise ValueError("Invalid normalization method.")
# Compute daisy features
daisy_descriptor = _daisy(
pixels,
step=step,
radius=radius,
rings=rings,
histograms=histograms,
orientations=orientations,
normalization=normalization,
sigmas=sigmas,
ring_radii=ring_radii,
)
# print information
if verbose:
info_str = "Daisy Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], pixels.shape[0]
)
info_str = "{} - Sampling step is {}.\n".format(info_str, step)
info_str = (
"{} - Radius of {} pixels, {} rings and {} histograms "
"with {} orientations.\n".format(
info_str, radius, rings, histograms, orientations
)
)
if not normalization == "off":
info_str = "{} - Using {} normalization.\n".format(info_str, normalization)
else:
info_str = "{} - No normalization emplyed.\n".format(info_str)
info_str = "{}Output image size {}W x {}H x {}.".format(
info_str,
daisy_descriptor.shape[2],
daisy_descriptor.shape[1],
daisy_descriptor.shape[0],
)
print(info_str)
return daisy_descriptor
@imgfeature
def normalize(img, scale_func=None, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixel values via mean centering and an optional scaling. By
default the scaling will be ``1.0``. The ``mode`` parameter selects
whether the normalisation is computed across all pixels in the image or
per-channel.
Parameters
----------
img : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
scale_func : `callable`, optional
Compute the scaling factor. Expects a single parameter and an optional
`axis` keyword argument and will be passed the entire pixel array.
Should return a 1D numpy array of one or more values.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
if scale_func is None:
def scale_func(_, axis=None):
return np.array([1.0])
pixels = img.as_vector(keep_channels=True)
if mode == "all":
centered_pixels = pixels - np.mean(pixels)
scale_factor = scale_func(centered_pixels)
elif mode == "per_channel":
centered_pixels = pixels - np.mean(pixels, axis=1, keepdims=True)
scale_factor = scale_func(centered_pixels, axis=1).reshape([-1, 1])
else:
raise ValueError(
"Supported modes are {{'all', 'per_channel'}} - '{}' "
"is not known".format(mode)
)
zero_denom = (scale_factor == 0).ravel()
any_non_zero = np.any(zero_denom)
if error_on_divide_by_zero and any_non_zero:
raise ValueError("Computed scale factor cannot be 0.0")
elif any_non_zero:
warnings.warn(
"One or more the scale factors are 0.0 and thus these"
"entries will be skipped during normalization."
)
non_zero_denom = ~zero_denom
centered_pixels[non_zero_denom] = (
centered_pixels[non_zero_denom] / scale_factor[non_zero_denom]
)
return img.from_vector(centered_pixels)
else:
return img.from_vector(centered_pixels / scale_factor)
@ndfeature
def normalize_norm(pixels, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit norm. The ``mode``
parameter selects whether the normalisation is computed across all pixels in
the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_norm(x, axis=None):
return np.linalg.norm(x, axis=axis)
return normalize(
pixels,
scale_func=unit_norm,
mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero,
)
@ndfeature
def normalize_std(pixels, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit standard deviation.
The ``mode`` parameter selects whether the normalisation is computed across
all pixels in the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_std(x, axis=None):
return np.std(x, axis=axis)
return normalize(
pixels,
scale_func=unit_std,
mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero,
)
@ndfeature
def normalize_var(pixels, mode="all", error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and normalize according
to the variance.
The ``mode`` parameter selects whether the normalisation is computed across
all pixels in the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_var(x, axis=None):
return np.var(x, axis=axis)
return normalize(
pixels,
scale_func=unit_var,
mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero,
)
@ndfeature
def no_op(pixels):
r"""
A no operation feature - does nothing but return a copy of the pixels
passed in.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A copy of the image that was passed in.
"""
return pixels.copy()
|
app-testing/src/resources/__init__.py | anuwrag/opentrons | 235 | 12609358 | """Definitions of resources needed in tests."""
|
keras/examples/reuters_mlp.py | molingbo/crcn | 167 | 12609380 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
'''
Train and evaluate a simple MLP on the Reuters newswire topic classification task.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python examples/reuters_mlp.py
CPU run command:
python examples/reuters_mlp.py
'''
max_words = 10000
batch_size = 16
print("Loading data...")
(X_train, y_train), (X_test, y_test) = reuters.load_data(nb_words=max_words, test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
nb_classes = np.max(y_train)+1
print(nb_classes, 'classes')
print("Vectorizing sequence data...")
tokenizer = Tokenizer(nb_words=max_words)
X_train = tokenizer.sequences_to_matrix(X_train, mode="binary")
X_test = tokenizer.sequences_to_matrix(X_test, mode="binary")
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print("Convert class vector to binary class matrix (for use with categorical_crossentropy)")
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
print("Building model...")
model = Sequential()
model.add(Dense(max_words, 256, init='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256, nb_classes, init='normal'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(X_train, Y_train, nb_epoch=4, batch_size=batch_size, verbose=1, show_accuracy=True, validation_split=0.1)
score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1, show_accuracy=True)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
test/feature/test_affine_shape_estimator.py | saurabhya/kornia | 418 | 12609405 | import pytest
import torch
from torch.autograd import gradcheck
import kornia.testing as utils # test utils
from kornia.feature.affine_shape import LAFAffineShapeEstimator, LAFAffNetShapeEstimator, PatchAffineShapeEstimator
from kornia.testing import assert_close
class TestPatchAffineShapeEstimator:
def test_shape(self, device):
inp = torch.rand(1, 1, 32, 32, device=device)
ori = PatchAffineShapeEstimator(32).to(device)
ang = ori(inp)
assert ang.shape == torch.Size([1, 1, 3])
def test_shape_batch(self, device):
inp = torch.rand(2, 1, 32, 32, device=device)
ori = PatchAffineShapeEstimator(32).to(device)
ang = ori(inp)
assert ang.shape == torch.Size([2, 1, 3])
def test_print(self, device):
sift = PatchAffineShapeEstimator(32)
sift.__repr__()
def test_toy(self, device):
aff = PatchAffineShapeEstimator(19).to(device)
inp = torch.zeros(1, 1, 19, 19, device=device)
inp[:, :, 5:-5, 1:-1] = 1
abc = aff(inp)
expected = torch.tensor([[[0.4146, 0.0000, 1.0000]]], device=device)
assert_close(abc, expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 1, 13, 13
ori = PatchAffineShapeEstimator(width).to(device)
patches = torch.rand(batch_size, channels, height, width, device=device)
patches = utils.tensor_to_gradcheck_var(patches) # to var
assert gradcheck(ori, (patches,), raise_exception=True, nondet_tol=1e-4)
def test_jit(self, device, dtype):
B, C, H, W = 2, 1, 13, 13
patches = torch.ones(B, C, H, W, device=device, dtype=dtype)
tfeat = PatchAffineShapeEstimator(W).to(patches.device, patches.dtype).eval()
tfeat_jit = torch.jit.script(PatchAffineShapeEstimator(W).to(patches.device, patches.dtype).eval())
assert_close(tfeat_jit(patches), tfeat(patches))
class TestLAFAffineShapeEstimator:
def test_shape(self, device):
inp = torch.rand(1, 1, 32, 32, device=device)
laf = torch.rand(1, 1, 2, 3, device=device)
ori = LAFAffineShapeEstimator().to(device)
out = ori(laf, inp)
assert out.shape == laf.shape
def test_shape_batch(self, device):
inp = torch.rand(2, 1, 32, 32, device=device)
laf = torch.rand(2, 34, 2, 3, device=device)
ori = LAFAffineShapeEstimator().to(device)
out = ori(laf, inp)
assert out.shape == laf.shape
def test_print(self, device):
sift = LAFAffineShapeEstimator()
sift.__repr__()
def test_toy(self, device, dtype):
aff = LAFAffineShapeEstimator(32, preserve_orientation=False).to(device, dtype)
inp = torch.zeros(1, 1, 32, 32, device=device, dtype=dtype)
inp[:, :, 15:-15, 9:-9] = 1
laf = torch.tensor([[[[20.0, 0.0, 16.0], [0.0, 20.0, 16.0]]]], device=device, dtype=dtype)
new_laf = aff(laf, inp)
expected = torch.tensor([[[[36.643, 0.0, 16.0], [0.0, 10.916, 16.0]]]], device=device, dtype=dtype)
assert_close(new_laf, expected, atol=1e-4, rtol=1e-4)
def test_toy_preserve(self, device, dtype):
aff = LAFAffineShapeEstimator(32, preserve_orientation=True).to(device, dtype)
inp = torch.zeros(1, 1, 32, 32, device=device, dtype=dtype)
inp[:, :, 15:-15, 9:-9] = 1
laf = torch.tensor([[[[0.0, 20.0, 16.0], [-20.0, 0.0, 16.0]]]], device=device, dtype=dtype)
new_laf = aff(laf, inp)
expected = torch.tensor([[[[0.0, 36.643, 16.0], [-10.916, 0, 16.0]]]], device=device, dtype=dtype)
assert_close(new_laf, expected, atol=1e-4, rtol=1e-4)
def test_toy_not_preserve(self, device):
aff = LAFAffineShapeEstimator(32, preserve_orientation=False).to(device)
inp = torch.zeros(1, 1, 32, 32, device=device)
inp[:, :, 15:-15, 9:-9] = 1
laf = torch.tensor([[[[0.0, 20.0, 16.0], [-20.0, 0.0, 16.0]]]], device=device)
new_laf = aff(laf, inp)
expected = torch.tensor([[[[36.643, 0, 16.0], [0, 10.916, 16.0]]]], device=device)
assert_close(new_laf, expected, atol=1e-4, rtol=1e-4)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 1, 40, 40
patches = torch.rand(batch_size, channels, height, width, device=device)
patches = utils.tensor_to_gradcheck_var(patches) # to var
laf = torch.tensor([[[[5.0, 0.0, 26.0], [0.0, 5.0, 26.0]]]], device=device)
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(
LAFAffineShapeEstimator(11).to(device),
(laf, patches),
raise_exception=True,
rtol=1e-3,
atol=1e-3,
nondet_tol=1e-4,
)
@pytest.mark.jit
@pytest.mark.skip("Failing because of extract patches")
def test_jit(self, device, dtype):
B, C, H, W = 1, 1, 13, 13
inp = torch.zeros(B, C, H, W, device=device)
inp[:, :, 15:-15, 9:-9] = 1
laf = torch.tensor([[[[20.0, 0.0, 16.0], [0.0, 20.0, 16.0]]]], device=device)
tfeat = LAFAffineShapeEstimator(W).to(inp.device, inp.dtype).eval()
tfeat_jit = torch.jit.script(LAFAffineShapeEstimator(W).to(inp.device, inp.dtype).eval())
assert_close(tfeat_jit(laf, inp), tfeat(laf, inp))
class TestLAFAffNetShapeEstimator:
def test_shape(self, device):
inp = torch.rand(1, 1, 32, 32, device=device)
laf = torch.rand(1, 1, 2, 3, device=device)
ori = LAFAffNetShapeEstimator(False).to(device).eval()
out = ori(laf, inp)
assert out.shape == laf.shape
def test_pretrained(self, device):
inp = torch.rand(1, 1, 32, 32, device=device)
laf = torch.rand(1, 1, 2, 3, device=device)
ori = LAFAffNetShapeEstimator(True).to(device).eval()
out = ori(laf, inp)
assert out.shape == laf.shape
def test_shape_batch(self, device):
inp = torch.rand(2, 1, 32, 32, device=device)
laf = torch.rand(2, 5, 2, 3, device=device)
ori = LAFAffNetShapeEstimator().to(device).eval()
out = ori(laf, inp)
assert out.shape == laf.shape
def test_print(self, device):
sift = LAFAffNetShapeEstimator()
sift.__repr__()
def test_toy(self, device):
aff = LAFAffNetShapeEstimator(True).to(device).eval()
inp = torch.zeros(1, 1, 32, 32, device=device)
inp[:, :, 15:-15, 9:-9] = 1
laf = torch.tensor([[[[20.0, 0.0, 16.0], [0.0, 20.0, 16.0]]]], device=device)
new_laf = aff(laf, inp)
expected = torch.tensor([[[[40.8758, 0.0, 16.0], [-0.3824, 9.7857, 16.0]]]], device=device)
assert_close(new_laf, expected, atol=1e-4, rtol=1e-4)
@pytest.mark.skip("jacobian not well computed")
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 1, 35, 35
patches = torch.rand(batch_size, channels, height, width, device=device)
patches = utils.tensor_to_gradcheck_var(patches) # to var
laf = torch.tensor([[[[8.0, 0.0, 16.0], [0.0, 8.0, 16.0]]]], device=device)
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(
LAFAffNetShapeEstimator(True).to(device, dtype=patches.dtype),
(laf, patches),
raise_exception=True,
rtol=1e-3,
atol=1e-3,
nondet_tol=1e-4,
)
@pytest.mark.jit
@pytest.mark.skip("Laf type is not a torch.Tensor????")
def test_jit(self, device, dtype):
B, C, H, W = 1, 1, 32, 32
patches = torch.rand(B, C, H, W, device=device, dtype=dtype)
laf = torch.tensor([[[[8.0, 0.0, 16.0], [0.0, 8.0, 16.0]]]], device=device)
laf_estimator = LAFAffNetShapeEstimator(True).to(device, dtype=patches.dtype).eval()
laf_estimator_jit = torch.jit.script(LAFAffNetShapeEstimator(True).to(device, dtype=patches.dtype).eval())
assert_close(laf_estimator(laf, patches), laf_estimator_jit(laf, patches))
|
tests/test_codecs.py | matchup-ir/whooshy | 270 | 12609410 | from __future__ import with_statement
import random
from array import array
import pytest
from whoosh import analysis, fields, formats, query
from whoosh.compat import u, b, text_type
from whoosh.compat import array_tobytes, xrange
from whoosh.codec import default_codec
from whoosh.filedb.filestore import RamStorage
from whoosh.util.testing import TempStorage
def _make_codec(**kwargs):
st = RamStorage()
codec = default_codec(**kwargs)
seg = codec.new_segment(st, "test")
return st, codec, seg
class FakeLengths(object):
def __init__(self, **lens):
self.lens = lens
def doc_field_length(self, docnum, fieldname):
if fieldname in self.lens:
if docnum < len(self.lens[fieldname]):
return self.lens[fieldname][docnum]
return 1
def test_termkey():
st, codec, seg = _make_codec()
tw = codec.field_writer(st, seg)
fieldobj = fields.TEXT()
tw.start_field("alfa", fieldobj)
tw.start_term(b("bravo"))
tw.add(0, 1.0, b(""), 3)
tw.finish_term()
tw.start_term(b('\xc3\xa6\xc3\xaf\xc5\xc3\xba'))
tw.add(0, 4.0, b(""), 3)
tw.finish_term()
tw.finish_field()
tw.start_field("text", fieldobj)
tw.start_term(b('\xe6\xa5\xe6\xac\xe8\xaa'))
tw.add(0, 7.0, b(""), 9)
tw.finish_term()
tw.finish_field()
tw.close()
tr = codec.terms_reader(st, seg)
assert ("alfa", b("bravo")) in tr
assert ("alfa", b('\xc3\xa6\xc3\xaf\xc5\xc3\xba')) in tr
assert ("text", b('\xe6\xa5\xe6\xac\xe8\xaa')) in tr
tr.close()
def test_random_termkeys():
def random_fieldname():
return "".join(chr(random.randint(65, 90)) for _ in xrange(1, 20))
def random_btext():
a = array("H", (random.randint(0, 0xd7ff) for _ in xrange(1, 20)))
return array_tobytes(a).decode("utf-16")
domain = sorted(set([(random_fieldname(), random_btext().encode("utf-8"))
for _ in xrange(1000)]))
st, codec, seg = _make_codec()
fieldobj = fields.TEXT()
tw = codec.field_writer(st, seg)
# Stupid ultra-low-level hand-adding of postings just to check handling of
# random fieldnames and term texts
lastfield = None
for fieldname, text in domain:
if lastfield and fieldname != lastfield:
tw.finish_field()
lastfield = None
if lastfield is None:
tw.start_field(fieldname, fieldobj)
lastfield = fieldname
tw.start_term(text)
tw.add(0, 1.0, b(""), 1)
tw.finish_term()
if lastfield:
tw.finish_field()
tw.close()
tr = codec.terms_reader(st, seg)
for term in domain:
assert term in tr
def test_stored_fields():
codec = default_codec()
fieldobj = fields.TEXT(stored=True)
with TempStorage("storedfields") as st:
seg = codec.new_segment(st, "test")
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
dw.add_field("a", fieldobj, "hello", 1)
dw.add_field("b", fieldobj, "there", 1)
dw.finish_doc()
dw.start_doc(1)
dw.add_field("a", fieldobj, "one", 1)
dw.add_field("b", fieldobj, "two", 1)
dw.add_field("c", fieldobj, "three", 1)
dw.finish_doc()
dw.start_doc(2)
dw.finish_doc()
dw.start_doc(3)
dw.add_field("a", fieldobj, "alfa", 1)
dw.add_field("b", fieldobj, "bravo", 1)
dw.finish_doc()
dw.close()
seg.set_doc_count(4)
pdr = codec.per_document_reader(st, seg)
assert pdr.doc_count_all() == 4
assert pdr.stored_fields(0) == {"a": "hello", "b": "there"}
# Note: access out of order
assert pdr.stored_fields(3), {"a": "alfa", "b": "bravo"}
assert pdr.stored_fields(1) == {"a": "one", "b": "two", "c": "three"}
sfs = list(pdr.all_stored_fields())
assert len(sfs) == 4
assert sfs == [{"a": "hello", "b": "there"},
{"a": "one", "b": "two", "c": "three"},
{},
{"a": "alfa", "b": "bravo"},
]
pdr.close()
def test_termindex():
terms = [("a", "alfa"), ("a", "bravo"), ("a", "charlie"), ("a", "delta"),
("b", "able"), ("b", "baker"), ("b", "dog"), ("b", "easy")]
st, codec, seg = _make_codec()
schema = fields.Schema(a=fields.TEXT, b=fields.TEXT)
tw = codec.field_writer(st, seg)
postings = ((fname, b(text), 0, i, b("")) for (i, (fname, text))
in enumerate(terms))
tw.add_postings(schema, FakeLengths(), postings)
tw.close()
tr = codec.terms_reader(st, seg)
for i, (fieldname, text) in enumerate(terms):
assert (fieldname, b(text)) in tr
ti = tr.term_info(fieldname, b(text))
assert ti.weight() == i
assert ti.doc_frequency() == 1
def test_docwriter_one():
field = fields.TEXT(stored=True)
st, codec, seg = _make_codec()
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
dw.add_field("text", field, "Testing one two three", 4)
dw.finish_doc()
dw.close()
seg.set_doc_count(1)
pdr = codec.per_document_reader(st, seg)
assert pdr.doc_field_length(0, "text") == 4
assert pdr.stored_fields(0) == {"text": "Testing one two three"}
def test_docwriter_two():
field = fields.TEXT(stored=True)
st, codec, seg = _make_codec()
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
dw.add_field("title", field, ("a", "b"), 2)
dw.add_field("text", field, "Testing one two three", 4)
dw.finish_doc()
dw.start_doc(1)
dw.add_field("title", field, "The second document", 3)
dw.add_field("text", field, 500, 1)
dw.finish_doc()
dw.close()
seg.set_doc_count(2)
pdr = codec.per_document_reader(st, seg)
assert pdr.doc_field_length(0, "title") == 2
assert pdr.doc_field_length(0, "text") == 4
assert pdr.doc_field_length(1, "title") == 3
assert pdr.doc_field_length(1, "text") == 1
assert (pdr.stored_fields(0)
== {"title": ("a", "b"), "text": "Testing one two three"})
assert (pdr.stored_fields(1)
== {"title": "The second document", "text": 500})
def test_vector():
field = fields.TEXT(vector=True)
st, codec, seg = _make_codec()
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
dw.add_field("title", field, None, 1)
dw.add_vector_items("title", field, [(u("alfa"), 1.0, b("t1")),
(u("bravo"), 2.0, b("t2"))])
dw.finish_doc()
dw.close()
seg.set_doc_count(1)
pdr = codec.per_document_reader(st, seg)
assert pdr.stored_fields(0) == {}
m = pdr.vector(0, "title", field.vector)
assert m.is_active()
ps = []
while m.is_active():
ps.append((m.id(), m.weight(), m.value()))
m.next()
assert ps == [(u("alfa"), 1.0, b("t1")), (u("bravo"), 2.0, b("t2"))]
def test_vector_values():
field = fields.TEXT(vector=formats.Frequency())
st, codec, seg = _make_codec()
content = u("alfa bravo charlie alfa")
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
vals = ((t, w, v) for t, _, w, v
in sorted(field.vector.word_values(content, field.analyzer)))
dw.add_vector_items("f1", field, vals)
dw.finish_doc()
dw.close()
vr = codec.per_document_reader(st, seg)
m = vr.vector(0, "f1", field.vector)
assert (list(m.items_as("frequency"))
== [("alfa", 2), ("bravo", 1), ("charlie", 1)])
def test_no_lengths():
f1 = fields.ID()
st, codec, seg = _make_codec()
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
dw.add_field("name", f1, None, None)
dw.finish_doc()
dw.start_doc(1)
dw.add_field("name", f1, None, None)
dw.finish_doc()
dw.start_doc(2)
dw.add_field("name", f1, None, None)
dw.finish_doc()
dw.close()
seg.set_doc_count(3)
pdr = codec.per_document_reader(st, seg)
assert pdr.doc_field_length(0, "name") == 0
assert pdr.doc_field_length(1, "name") == 0
assert pdr.doc_field_length(2, "name") == 0
def test_store_zero():
f1 = fields.ID(stored=True)
st, codec, seg = _make_codec()
dw = codec.per_document_writer(st, seg)
dw.start_doc(0)
dw.add_field("name", f1, 0, None)
dw.finish_doc()
dw.close()
seg.set_doc_count(1)
sr = codec.per_document_reader(st, seg)
assert sr.stored_fields(0) == {"name": 0}
def test_fieldwriter_single_term():
field = fields.TEXT()
st, codec, seg = _make_codec()
fw = codec.field_writer(st, seg)
fw.start_field("text", field)
fw.start_term(b("alfa"))
fw.add(0, 1.5, b("test"), 1)
fw.finish_term()
fw.finish_field()
fw.close()
tr = codec.terms_reader(st, seg)
assert ("text", b("alfa")) in tr
ti = tr.term_info("text", b("alfa"))
assert ti.weight() == 1.5
assert ti.doc_frequency() == 1
assert ti.min_length() == 1
assert ti.max_length() == 1
assert ti.max_weight() == 1.5
assert ti.min_id() == 0
assert ti.max_id() == 0
def test_fieldwriter_two_terms():
field = fields.TEXT()
st, codec, seg = _make_codec()
fw = codec.field_writer(st, seg)
fw.start_field("text", field)
fw.start_term(b("alfa"))
fw.add(0, 2.0, b("test1"), 2)
fw.add(1, 1.0, b("test2"), 1)
fw.finish_term()
fw.start_term(b("bravo"))
fw.add(0, 3.0, b("test3"), 3)
fw.add(2, 2.0, b("test4"), 2)
fw.finish_term()
fw.finish_field()
fw.close()
tr = codec.terms_reader(st, seg)
assert ("text", b("alfa")) in tr
ti = tr.term_info("text", b("alfa"))
assert ti.weight() == 3.0
assert ti.doc_frequency() == 2
assert ti.min_length() == 1
assert ti.max_length() == 2
assert ti.max_weight() == 2.0
assert ti.min_id() == 0
assert ti.max_id() == 1
assert ("text", b("bravo")) in tr
ti = tr.term_info("text", b("bravo"))
assert ti.weight() == 5.0
assert ti.doc_frequency() == 2
assert ti.min_length() == 2
assert ti.max_length() == 3
assert ti.max_weight() == 3.0
assert ti.min_id() == 0
assert ti.max_id() == 2
m = tr.matcher("text", b("bravo"), field.format)
assert list(m.all_ids()) == [0, 2]
def test_fieldwriter_multiblock():
field = fields.TEXT()
st, codec, seg = _make_codec(blocklimit=2)
fw = codec.field_writer(st, seg)
fw.start_field("text", field)
fw.start_term(b("alfa"))
fw.add(0, 2.0, b("test1"), 2)
fw.add(1, 5.0, b("test2"), 5)
fw.add(2, 3.0, b("test3"), 3)
fw.add(3, 4.0, b("test4"), 4)
fw.add(4, 1.0, b("test5"), 1)
fw.finish_term()
fw.finish_field()
fw.close()
tr = codec.terms_reader(st, seg)
ti = tr.term_info("text", b("alfa"))
assert ti.weight() == 15.0
assert ti.doc_frequency() == 5
assert ti.min_length() == 1
assert ti.max_length() == 5
assert ti.max_weight() == 5.0
assert ti.min_id() == 0
assert ti.max_id() == 4
ps = []
m = tr.matcher("text", b("alfa"), field.format)
while m.is_active():
ps.append((m.id(), m.weight(), m.value()))
m.next()
assert ps == [(0, 2.0, b("test1")), (1, 5.0, b("test2")),
(2, 3.0, b("test3")), (3, 4.0, b("test4")),
(4, 1.0, b("test5"))]
def test_term_values():
field = fields.TEXT(phrase=False)
st, codec, seg = _make_codec()
content = u("alfa bravo charlie alfa")
fw = codec.field_writer(st, seg)
fw.start_field("f1", field)
for text, freq, weight, val in sorted(field.index(content)):
fw.start_term(text)
fw.add(0, weight, val, freq)
fw.finish_term()
fw.finish_field()
fw.close()
tr = codec.terms_reader(st, seg)
ps = [(term, ti.weight(), ti.doc_frequency()) for term, ti in tr.items()]
assert ps == [(("f1", b("alfa")), 2.0, 1), (("f1", b("bravo")), 1.0, 1),
(("f1", b("charlie")), 1.0, 1)]
def test_skip():
_docnums = [1, 3, 12, 34, 43, 67, 68, 102, 145, 212, 283, 291, 412, 900,
905, 1024, 1800, 2048, 15000]
st, codec, seg = _make_codec()
fieldobj = fields.TEXT()
fw = codec.field_writer(st, seg)
fw.start_field("f1", fieldobj)
fw.start_term(b("test"))
for n in _docnums:
fw.add(n, 1.0, b(''), None)
fw.finish_term()
fw.finish_field()
fw.close()
tr = codec.terms_reader(st, seg)
m = tr.matcher("f1", b("test"), fieldobj.format)
assert m.id() == 1
m.skip_to(220)
assert m.id() == 283
m.skip_to(1)
assert m.id() == 283
m.skip_to(1000)
assert m.id() == 1024
m.skip_to(1800)
assert m.id() == 1800
# def test_spelled_field():
# field = fields.TEXT(spelling=True)
# st, codec, seg = _make_codec()
#
# fw = codec.field_writer(st, seg)
# fw.start_field("text", field)
# fw.start_term(b("special"))
# fw.add(0, 1.0, b("test1"), 1)
# fw.finish_term()
# fw.start_term(b("specific"))
# fw.add(1, 1.0, b("test2"), 1)
# fw.finish_term()
# fw.finish_field()
# fw.close()
#
# gr = codec.graph_reader(st, seg)
# assert gr.has_root("text")
# cur = gr.cursor("text")
# strings = list(cur.flatten_strings())
# assert type(strings[0]) == text_type
# assert strings == ["special", "specific"]
#
#
# def test_special_spelled_field():
# from whoosh.analysis import StemmingAnalyzer
#
# field = fields.TEXT(analyzer=StemmingAnalyzer(), spelling=True)
# st, codec, seg = _make_codec()
#
# fw = codec.field_writer(st, seg)
# fw.start_field("text", field)
# fw.start_term(b("special"))
# fw.add(0, 1.0, b("test1"), 1)
# fw.finish_term()
# fw.start_term(b("specific"))
# fw.add(1, 1.0, b("test2"), 1)
# fw.finish_term()
# fw.add_spell_word("text", u("specials"))
# fw.add_spell_word("text", u("specifically"))
# fw.finish_field()
# fw.close()
#
# tr = codec.terms_reader(st, seg)
# assert list(tr.terms()) == [("text", b("special")), ("text", b("specific"))]
#
# cur = codec.graph_reader(st, seg).cursor("text")
# assert list(cur.flatten_strings()) == ["specials", "specifically"]
def test_plaintext_codec():
pytest.importorskip("ast")
from whoosh.codec.plaintext import PlainTextCodec
from whoosh.codec.whoosh3 import W3Codec
ana = analysis.StemmingAnalyzer()
schema = fields.Schema(a=fields.TEXT(vector=True, sortable=True),
b=fields.STORED,
c=fields.NUMERIC(stored=True, sortable=True),
d=fields.TEXT(analyzer=ana, spelling=True))
st = RamStorage()
ix = st.create_index(schema)
with ix.writer(codec=W3Codec()) as w:
w.add_document(a=u("alfa bravo charlie"), b="hello", c=100,
d=u("quelling whining echoing"))
w.add_document(a=u("bravo charlie delta"), b=1000, c=200,
d=u("rolling timing yelling"))
w.add_document(a=u("charlie delta echo"), b=5.5, c=300,
d=u("using opening pulling"))
w.add_document(a=u("delta echo foxtrot"), b=True, c=-100,
d=u("aching selling dipping"))
w.add_document(a=u("echo foxtrot india"), b=None, c=-200,
d=u("filling going hopping"))
with ix.reader() as r:
assert r.has_column("a")
c = r.column_reader("a")
assert c[2] == u("charlie delta echo")
w = ix.writer(codec=PlainTextCodec())
w.commit(optimize=True)
with ix.searcher() as s:
reader = s.reader()
assert isinstance(reader.codec(), PlainTextCodec)
r = s.search(query.Term("a", "delta"))
assert len(r) == 3
assert [hit["b"] for hit in r] == [1000, 5.5, True]
assert (" ".join(s.field_terms("a"))
== "alfa bravo charlie delta echo foxtrot india")
storage = ix.storage
for fname in storage.list():
if fname.endswith(".dcs"):
f = storage.open_file(fname)
# print(f.read().decode("utf8"))
assert reader.doc_field_length(0, "a") == 3
assert reader.doc_field_length(2, "a") == 3
cfield = schema["c"]
assert type(cfield), fields.NUMERIC
sortables = list(cfield.sortable_terms(reader, "c"))
assert sortables
assert ([cfield.from_bytes(t) for t in sortables]
== [-200, -100, 100, 200, 300])
assert reader.has_column("a")
c = reader.column_reader("a")
assert c[2] == u("charlie delta echo")
assert reader.has_column("c")
c = reader.column_reader("c")
assert list(c) == [100, 200, 300, -100, -200]
assert s.has_vector(2, "a")
v = s.vector(2, "a")
assert " ".join(v.all_ids()) == "charlie delta echo"
def test_memory_codec():
from whoosh.codec import memory
from whoosh.searching import Searcher
ana = analysis.StemmingAnalyzer()
schema = fields.Schema(a=fields.TEXT(vector=True),
b=fields.STORED,
c=fields.NUMERIC(stored=True, sortable=True),
d=fields.TEXT(analyzer=ana, spelling=True))
codec = memory.MemoryCodec()
with codec.writer(schema) as w:
w.add_document(a=u("<NAME>"), b="hello", c=100,
d=u("quelling whining echoing"))
w.add_document(a=u("bravo charlie delta"), b=1000, c=200,
d=u("rolling timing yelling"))
w.add_document(a=u("charlie delta echo"), b=5.5, c=300,
d=u("using opening pulling"))
w.add_document(a=u("delta echo foxtrot"), b=True, c=-100,
d=u("aching selling dipping"))
w.add_document(a=u("echo foxtrot india"), b=None, c=-200,
d=u("filling going hopping"))
reader = codec.reader(schema)
s = Searcher(reader)
assert ("a", "delta") in reader
q = query.Term("a", "delta")
r = s.search(q)
assert len(r) == 3
assert [hit["b"] for hit in r] == [1000, 5.5, True]
assert (" ".join(s.field_terms("a"))
== "alfa bravo charlie delta echo foxtrot india")
cfield = schema["c"]
c_sortables = cfield.sortable_terms(reader, "c")
c_values = [cfield.from_bytes(t) for t in c_sortables]
assert c_values, [-200, -100, 100, 200, 300]
assert reader.has_column("c")
c_values = list(reader.column_reader("c"))
assert c_values == [100, 200, 300, -100, -200]
assert s.has_vector(2, "a")
v = s.vector(2, "a")
assert " ".join(v.all_ids()) == "charlie delta echo"
def test_memory_multiwrite():
from whoosh.codec import memory
domain = ["alfa bravo charlie delta",
"bravo charlie delta echo",
"charlie delta echo foxtrot",
"delta echo foxtrot india",
"echo foxtrot india juliet"]
schema = fields.Schema(line=fields.TEXT(stored=True))
codec = memory.MemoryCodec()
for line in domain:
with codec.writer(schema) as w:
w.add_document(line=u(line))
reader = codec.reader(schema)
assert [sf["line"] for sf in reader.all_stored_fields()] == domain
assert (" ".join(reader.field_terms("line"))
== "alfa bravo charlie delta echo foxtrot india juliet")
|
QuickPotato/database/schemas.py | afparsons/QuickPotato | 130 | 12609412 | from sqlalchemy import MetaData, Table, Column, Integer, Float, String, Boolean
class RawStatisticsSchemas(object):
@staticmethod
def performance_statistics_schema():
meta = MetaData()
table = Table(
"performance_statistics", meta,
Column('id', Integer, primary_key=True),
Column('test_id', String(99)),
Column("test_case_name", String(999)),
Column('sample_id', String(99)),
Column("name_of_method_under_test", String(999)),
Column("epoch_timestamp", Integer),
Column("human_timestamp", String(99)),
Column("child_path", String(999)),
Column("child_line_number", Integer),
Column("child_function_name", String(999)),
Column("parent_path", String(999)),
Column("parent_line_number", Integer),
Column("parent_function_name", String(999)),
Column("number_of_calls", String(99)),
Column("total_time", Float),
Column("cumulative_time", Float),
Column("total_response_time", Float),
)
return table
class UnitPerformanceTestResultSchemas(object):
@staticmethod
def test_report_schema():
meta = MetaData()
table = Table(
"test_report", meta,
Column('id', Integer, primary_key=True),
Column('test_id', String(99)),
Column("test_case_name", String(999)),
Column("epoch_timestamp", Integer),
Column("human_timestamp", String(99)),
Column("status", Boolean),
Column("boundaries_breached", Boolean),
Column("regression_found", Boolean),
)
return table
@staticmethod
def boundaries_test_evidence_schema():
meta = MetaData()
table = Table(
"boundaries_test_evidence", meta,
Column('id', Integer, primary_key=True),
Column('test_id', String(99)),
Column("test_case_name", String(999)),
Column("epoch_timestamp", Integer),
Column("human_timestamp", String(99)),
Column("verification_name", String(999)),
Column("status", Boolean),
Column("value", Float),
Column("boundary", Float)
)
return table
@staticmethod
def regression_test_evidence_schema():
meta = MetaData()
table = Table(
"regression_test_evidence", meta,
Column('id', Integer, primary_key=True),
Column('test_id', String(99)),
Column("test_case_name", String(999)),
Column("epoch_timestamp", Integer),
Column("human_timestamp", String(99)),
Column("verification_name", String(999)),
Column("status", Boolean),
Column("value", Float),
Column("critical_value", Float)
)
return table
|
ipyleaflet/_version.py | vishalbelsare/ipyleaflet | 903 | 12609446 | <filename>ipyleaflet/_version.py
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
#
version_info = (0, 15, 0)
__version__ = '%s.%s.%s' % (version_info[0], version_info[1], version_info[2])
EXTENSION_VERSION = '^0.15.0'
|
samples/test/lightweight_python_functions_v2_with_outputs_test.py | rahulsmehta/pipelines | 2,860 | 12609454 | <reponame>rahulsmehta/pipelines
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pprint
import unittest
import kfp
import kfp_server_api
import os
from minio import Minio
from .lightweight_python_functions_v2_with_outputs import pipeline
from kfp.samples.test.utils import KfpMlmdClient, run_pipeline_func, TestCase
def verify(run: kfp_server_api.ApiRun, mlmd_connection_config, **kwargs):
t = unittest.TestCase()
t.maxDiff = None # we always want to see full diff
t.assertEqual(run.status, 'Succeeded')
client = KfpMlmdClient(mlmd_connection_config=mlmd_connection_config)
tasks = client.get_tasks(run_id=run.id)
pprint(tasks)
output_artifact = tasks['output-artifact']
output = [
a for a in output_artifact.outputs.artifacts if a.name == 'Output'
][0]
pprint(output)
host = os.environ['MINIO_SERVICE_SERVICE_HOST']
port = os.environ['MINIO_SERVICE_SERVICE_PORT']
minio = Minio(
f'{host}:{port}',
access_key='minio',
secret_key='minio123',
secure=False)
bucket, key = output.uri[len('minio://'):].split('/', 1)
print(f'bucket={bucket} key={key}')
response = minio.get_object(bucket, key)
data = response.read().decode('UTF-8')
t.assertEqual(data, 'firstsecond\nfirstsecond\nfirstsecond')
run_pipeline_func([
TestCase(
pipeline_func=pipeline,
mode=kfp.dsl.PipelineExecutionMode.V2_ENGINE,
),
])
|
api/tests/opentrons/drivers/mag_deck/test_driver.py | anuwrag/opentrons | 235 | 12609456 | from mock import AsyncMock
import pytest
from opentrons.drivers.asyncio.communication.serial_connection import SerialConnection
from opentrons.drivers.mag_deck.driver import (
MagDeckDriver,
MAG_DECK_COMMAND_TERMINATOR,
GCODE_ROUNDING_PRECISION,
)
from opentrons.drivers.command_builder import CommandBuilder
@pytest.fixture
def connection() -> AsyncMock:
return AsyncMock(spec=SerialConnection)
@pytest.fixture
def driver(connection: AsyncMock) -> MagDeckDriver:
connection.send_command.return_value = ""
return MagDeckDriver(connection)
async def test_home(driver: MagDeckDriver, connection: AsyncMock) -> None:
"""It should send a home command"""
await driver.home()
expected = CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR).add_gcode(
gcode="G28.2"
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
async def test_probe_plate(driver: MagDeckDriver, connection: AsyncMock) -> None:
"""It should send a probe plate command"""
await driver.probe_plate()
expected = CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR).add_gcode(
gcode="G38.2"
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
async def test_get_plate_height(driver: MagDeckDriver, connection: AsyncMock) -> None:
"""It should send a get plate height command and parse response"""
connection.send_command.return_value = "height:12.34"
response = await driver.get_plate_height()
expected = CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR).add_gcode(
gcode="M836"
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
assert response == 12.34
async def test_get_mag_position(driver: MagDeckDriver, connection: AsyncMock) -> None:
"""It should send a get mag position command and parse response"""
connection.send_command.return_value = "Z:12.34"
response = await driver.get_mag_position()
expected = CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR).add_gcode(
gcode="M114.2"
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
assert response == 12.34
async def test_move(driver: MagDeckDriver, connection: AsyncMock) -> None:
"""It should send a move command"""
await driver.move(321.2214)
expected = (
CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR)
.add_gcode(gcode="G0")
.add_float(prefix="Z", value=321.2214, precision=GCODE_ROUNDING_PRECISION)
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
async def test_get_device_info(driver: MagDeckDriver, connection: AsyncMock) -> None:
"""It should send a get device info command and parse response"""
connection.send_command.return_value = "serial:s model:m version:v"
response = await driver.get_device_info()
expected = CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR).add_gcode(
gcode="M115"
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
assert response == {"serial": "s", "model": "m", "version": "v"}
async def test_enter_programming_mode(
driver: MagDeckDriver, connection: AsyncMock
) -> None:
"""It should send an enter programming mode command"""
await driver.enter_programming_mode()
expected = CommandBuilder(terminator=MAG_DECK_COMMAND_TERMINATOR).add_gcode(
gcode="dfu"
)
connection.send_command.assert_called_once_with(command=expected, retries=3)
|
struct/read-write-file/main.py | whitmans-max/python-examples | 140 | 12609467 | <filename>struct/read-write-file/main.py<gh_stars>100-1000
#!/usr/bin/env python3
# date: 2019.11.27
#
import struct
struct_format = 'i20s'
struct_length = struct.calcsize(struct_format)
def main():
print('--- OPEN ---')
stream = open("test.bin", 'rb+');
# ---
print('--- READ ---')
buffer = stream.read(struct_length)
i, s = struct.unpack(struct_format, buffer)
print('int:', i) # int
print('str:', s.decode()) # char[20]
# ---
print('--- SEEK ---')
stream.seek(0, 0)
# ---
print('--- WRITE ---')
i = 333
s = "abcde".encode()
buffer = struct.pack(struct_format, i, s)
stream.write(buffer)
# ---
print('--- CLOSE ---')
stream.close()
#input("PAUSE (press ENTER)")
main()
|
modal/node_modules/accessibility-developer-tools/scripts/parse_aria_schemas.py | maze-runnar/modal-component | 2,158 | 12609472 | <filename>modal/node_modules/accessibility-developer-tools/scripts/parse_aria_schemas.py
import json
import re
import urllib
import xml.etree.ElementTree as ET
def parse_attributes():
schema = urllib.urlopen('http://www.w3.org/MarkUp/SCHEMA/aria-attributes-1.xsd')
tree = ET.parse(schema)
for node in tree.iter():
node.tag = re.sub(r'{.*}', r'', node.tag)
type_map = {
'states': 'state',
'props': 'property'
}
properties = {}
groups = tree.getroot().findall('attributeGroup')
print groups
for group in groups:
print(group.get('name'))
name_match = re.match(r'ARIA\.(\w+)\.attrib', group.get('name'))
if not name_match:
continue
group_type = name_match.group(1)
print group_type
if group_type not in type_map:
continue
type = type_map[group_type]
for child in group:
name = re.sub(r'aria-', r'', child.attrib['name'])
property = {}
property['type'] = type
if 'type' in child.attrib:
valueType = re.sub(r'xs:', r'', child.attrib['type'])
if valueType == 'IDREF':
property['valueType'] = 'idref'
elif valueType == 'IDREFS':
property['valueType'] = 'idref_list'
else:
property['valueType'] = valueType
else:
type_spec = child.findall('simpleType')[0]
restriction_spec = type_spec.findall('restriction')[0]
base = restriction_spec.attrib['base']
if base == 'xs:NMTOKENS':
property['valueType'] = 'token_list'
elif base == 'xs:NMTOKEN':
property['valueType'] = 'token'
else:
raise Exception('Unknown value type: %s' % base)
values = []
for value_type in restriction_spec:
values.append(value_type.get('value'))
property['values'] = values
if 'default' in child.attrib:
property['defaultValue'] = child.attrib['default']
properties[name] = property
return json.dumps(properties, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
attributes_json = parse_attributes()
constants_file = open('src/js/Constants.js', 'r')
new_constants_file = open('src/js/Constants.new.js', 'w')
in_autogen_block = False
for line in constants_file:
if not in_autogen_block:
new_constants_file.write('%s' % line)
if re.match(r'// BEGIN ARIA_PROPERTIES_AUTOGENERATED', line):
in_autogen_block = True
if re.match(r'// END ARIA_PROPERTIES_AUTOGENERATED', line):
break
new_constants_file.write('/** @type {Object.<string, Object>} */\n')
new_constants_file.write('axs.constants.ARIA_PROPERTIES = %s;\n' % attributes_json)
new_constants_file.write('// END ARIA_PROPERTIES_AUTOGENERATED\n')
for line in constants_file:
new_constants_file.write('%s' % line)
|
tests/nlu_core_tests/training_tests/classifiers/classifier_dl_tests.py | milyiyo/nlu | 480 | 12609481 | <gh_stars>100-1000
from sklearn.metrics import classification_report
import unittest
from nlu import *
import tests.test_utils as t
import pandas as pd
class ClassifierDlTests(unittest.TestCase):
def test_classifier_dl_training_labse(self):
test_df = self.load_classifier_dl_dataset()
train_df = test_df
train_df.columns = ['y','text']
test_df.columns = ['y','text']
pipe = nlu.load('xx.embed_sentence.labse train.classifier',verbose=True,)
pipe['classifier_dl'].setMaxEpochs(2)
pipe = pipe.fit(train_df)
df = pipe.predict(test_df)
pipe.print_info()
print(df.columns)
for c in df.columns : print (df[c])
def test_classifier_dl_training(self):
test_df = self.load_classifier_dl_dataset()
train_df = test_df
train_df.columns = ['y','text']
test_df.columns = ['y','text']
pipe = nlu.load('train.classifier',verbose=True,)
pipe['classifier_dl'].setMaxEpochs(2)
pipe = pipe.fit(train_df)
df = pipe.predict(train_df)
# print(df[['category','y']])
df = pipe.predict(test_df)
pipe.print_info()
print(df.columns)
# print(df[['category','y']])
# print (classification_report(df['y'], df['category']))
for c in df.columns : print (df[c])
# pipe.save('/home/loan/Documents/freelance/jsl/nlu/nlu4realgit/tests/trained_models/quick_classifi')
# Too heavy running on github actions
# def test_classifier_dl_custom_embeds_doc_level(self):
# test_df = self.load_classifier_dl_dataset()
# train_df = test_df
# train_df.columns = ['y','text']
# test_df.columns = ['y','text']
# pipe = nlu.load('embed_sentence.bert train.classifier',verbose=True,)
# pipe['classifier_dl'].setMaxEpochs(2)
# fitted_model = pipe.fit(train_df)
# df = fitted_model.predict(train_df, output_level='document')
# print(df.columns)
# print(df[['category','y']])
# df = fitted_model.predict(test_df, output_level='document')
# print(df.columns)
# print(df[['category','y']])
#
# # Eval results
# from sklearn.metrics import classification_report
#
# print (classification_report(df['y'], df['category']))
#
# def test_classifier_dl_custom_embeds_sentence_level(self):
# test_df = self.load_classifier_dl_dataset()
# train_df = test_df
# train_df.columns = ['y','text']
# test_df.columns = ['y','text']
# pipe = nlu.load('embed_sentence.bert train.classifier',verbose=True,)
# pipe['classifier_dl'].setMaxEpochs(2)
# fitted_model = pipe.fit(train_df)
# df = fitted_model.predict(train_df, output_level='sentence')
#
# print(df.columns)
# print(df[['category','y']])
# df = fitted_model.predict(test_df, output_level='sentence')
# print(df.columns)
# print(df[['category','y']])
#
# # Eval results
# from sklearn.metrics import classification_report
#
# print (classification_report(df['y'], df['category']))
#
#
# def test_classifier_dl_custom_embeds_auto_level(self):
# test_df = self.load_classifier_dl_dataset()
# train_df = test_df
# train_df.columns = ['y','text']
# test_df.columns = ['y','text']
# pipe = nlu.load('embed_sentence.bert train.classifier',verbose=True,)
# pipe['classifier_dl'].setMaxEpochs(2)
# fitted_model = pipe.fit(train_df)
# df = fitted_model.predict(train_df)
# print(df.columns)
# print(df[['category','y']])
# df = fitted_model.predict(test_df)
# print(df.columns)
# print(df[['category','y']])
#
# # Eval results
# from sklearn.metrics import classification_report
#
# print (classification_report(df['y'], df['category']))
def load_classifier_dl_dataset(self):
output_file_name = 'news_category_test.csv'
output_folder = 'classifier_dl/'
data_url = "https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/classifier-dl/news_Category/news_category_test.csv"
return pd.read_csv(t.download_dataset(data_url,output_file_name,output_folder)).iloc[0:15]
if __name__ == '__main__':
unittest.main()
|
piwheels/master/cloud_gazer.py | jgillis/piwheels | 120 | 12609518 | <reponame>jgillis/piwheels
# The piwheels project
# Copyright (c) 2017 <NAME> <https://github.com/bennuttall>
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines the :class:`CloudGazer` task; see class for more details.
.. autoclass:: CloudGazer
:members:
"""
from datetime import timedelta
from .. import protocols, transport, tasks, const
from ..format import canonicalize_name
from .pypi import PyPIEvents
from .the_oracle import DbClient
class CloudGazer(tasks.PauseableTask):
"""
This task scrapes PyPI for the list of available packages, and the versions
of those packages. This information is written into the backend database
for :class:`~.the_architect.TheArchitect` to use.
"""
name = 'master.cloud_gazer'
def __init__(self, config):
super().__init__(config)
self.db = DbClient(config, self.logger)
self.pypi = PyPIEvents(
pypi_xmlrpc=config.pypi_xmlrpc,
pypi_json=config.pypi_json)
self.web_queue = self.socket(
transport.REQ, protocol=reversed(protocols.the_scribe))
self.web_queue.connect(config.web_queue)
self.skip_queue = self.socket(
transport.REQ, protocol=protocols.cloud_gazer)
self.skip_queue.connect(const.SKIP_QUEUE)
self.serial = -1
self.packages = None
if config.dev_mode:
self.skip_default = 'development mode'
else:
self.skip_default = ''
self.every(timedelta(seconds=10), self.read_pypi)
def once(self):
self.logger.info('retrieving current state')
self.packages = self.db.get_all_packages()
self.pypi.serial = self.serial = self.db.get_pypi_serial()
self.logger.info('querying upstream')
def add_package_name(self, package, package_alias, timestamp):
if package_alias != package:
# add the canonical form as an alias, with no timestamp
# (gets logged as 1970) so it can't become the display name
self.db.add_package_name(package, package)
# add the used alias with the event timestamp
self.db.add_package_name(package, package_alias, timestamp)
def read_pypi(self):
for package_alias, version, timestamp, action, description in self.pypi:
package = canonicalize_name(package_alias)
if action == 'remove':
if version is None:
self.logger.info('marking package %s for deletion', package)
self.db.skip_package(package, 'deleted')
self.packages.discard(package)
self.web_queue.send_msg('DELPKG', package)
self.skip_queue.send_msg('DELPKG', package)
self.web_queue.recv_msg()
self.skip_queue.recv_msg()
self.db.delete_package(package)
else:
self.logger.info('marking package %s version %s for deletion',
package, version)
self.db.skip_package_version(package, version, 'deleted')
self.web_queue.send_msg('DELVER', (package, version))
self.skip_queue.send_msg('DELVER', (package, version))
self.web_queue.recv_msg()
self.skip_queue.recv_msg()
self.db.delete_version(package, version)
else:
if package not in self.packages:
self.packages.add(package)
if self.db.add_new_package(package, skip=self.skip_default,
description=description or ''):
self.logger.info('added package %s', package)
self.add_package_name(package, package_alias, timestamp)
self.web_queue.send_msg('BOTH', package)
self.web_queue.recv_msg()
elif description is not None:
self.db.set_package_description(package, description)
self.add_package_name(package, package_alias, timestamp)
if version is not None:
skip = '' if action == 'source' else 'binary only'
update = 'BOTH'
if action == 'yank':
self.db.yank_version(package, version)
self.logger.info(
'yanked package %s version %s', package, version)
elif action == 'unyank':
self.db.unyank_version(package, version)
self.logger.info(
'unyanked package %s version %s', package, version)
elif self.db.add_new_package_version(package, version,
timestamp, skip):
self.logger.info(
'added package %s version %s', package, version)
if action != 'source':
self.logger.info(
'disabled package %s version %s (binary only)',
package, version)
if description is not None:
self.db.set_package_description(
package, description)
elif action == 'source' and self.db.get_version_skip(
package, version) == 'binary only':
self.db.skip_package_version(package, version, '')
self.logger.info(
'enabled package %s version %s', package, version)
update = 'PROJECT'
self.web_queue.send_msg(update, package)
self.web_queue.recv_msg()
if self.serial < self.pypi.serial:
self.serial = self.pypi.serial
self.db.set_pypi_serial(self.serial)
|
completions/properties.py | y0ssar1an/CSS3 | 192 | 12609522 | <reponame>y0ssar1an/CSS3
from CSS3.completions import types as t
import sublime
# PROPERTIES
names = [
("align-content", "align-content: ${1};"),
("align-items", "align-items: ${1};"),
("align-self", "align-self: ${1};"),
("alignment-baseline", "alignment-baseline: ${1};"),
("all", "all: ${1};"),
("animation", "animation: ${1};"),
("animation-composition", "animation-composition: ${1};"),
("animation-delay", "animation-delay: ${1};"),
("animation-direction", "animation-direction: ${1};"),
("animation-duration", "animation-duration: ${1};"),
("animation-fill-mode", "animation-fill-mode: ${1};"),
("animation-iteration-count", "animation-iteration-count: ${1};"),
("animation-name", "animation-name: ${1};"),
("animation-play-state", "animation-play-state: ${1};"),
("animation-timing-function", "animation-timing-function: ${1};"),
("appearance", "appearance: ${1};"),
("aspect-ratio", "aspect-ratio: ${1};"),
("backface-visibility", "backface-visibility: ${1};"),
("background", "background: ${1};"),
("background-attachment", "background-attachment: ${1};"),
("background-blend-mode", "background-blend-mode: ${1};"),
("background-clip", "background-clip: ${1};"),
("background-color", "background-color: ${1};"),
("background-image", "background-image: ${1};"),
("background-image-transform", "background-image-transform: ${1};"),
("background-origin", "background-origin: ${1};"),
("background-position", "background-position: ${1};"),
("background-position-x", "background-position-x: ${1};"),
("background-position-y", "background-position-y: ${1};"),
("background-repeat", "background-repeat: ${1};"),
("background-size", "background-size: ${1};"),
("baseline-shift", "baseline-shift: ${1};"),
("bleed", "bleed: ${1};"),
("block-ellipsis", "block-ellipsis: ${1};"),
("block-size", "block-size: ${1};"),
("block-step", "block-step: ${1};"),
("block-step-align", "block-step-align: ${1};"),
("block-step-insert", "block-step-insert: ${1};"),
("block-step-round", "block-step-round: ${1};"),
("block-step-size", "block-step-size: ${1};"),
("bookmark-label", "bookmark-label: ${1};"),
("bookmark-level", "bookmark-level: ${1};"),
("bookmark-state", "bookmark-state: ${1};"),
("border", "border: ${1};"),
("border-block", "border-block: ${1};"),
("border-block-color", "border-block-color: ${1};"),
("border-block-end", "border-block-end: ${1};"),
("border-block-end-color", "border-block-end-color: ${1};"),
("border-block-end-style", "border-block-end-style: ${1};"),
("border-block-end-width", "border-block-end-width: ${1};"),
("border-block-start", "border-block-start: ${1};"),
("border-block-start-color", "border-block-start-color: ${1};"),
("border-block-start-style", "border-block-start-style: ${1};"),
("border-block-start-width", "border-block-start-width: ${1};"),
("border-block-style", "border-block-style: ${1};"),
("border-block-width", "border-block-width: ${1};"),
("border-bottom", "border-bottom: ${1};"),
("border-bottom-color", "border-bottom-color: ${1};"),
("border-bottom-left-radius", "border-bottom-left-radius: ${1};"),
("border-bottom-right-radius", "border-bottom-right-radius: ${1};"),
("border-bottom-style", "border-bottom-style: ${1};"),
("border-bottom-width", "border-bottom-width: ${1};"),
("border-boundary", "border-boundary: ${1};"),
("border-collapse", "border-collapse: ${1};"),
("border-color", "border-color: ${1};"),
("border-image", "border-image: ${1};"),
("border-image-outset", "border-image-outset: ${1};"),
("border-image-repeat", "border-image-repeat: ${1};"),
("border-image-slice", "border-image-slice: ${1};"),
("border-image-source", "border-image-source: ${1};"),
("border-image-transform", "border-image-transform: ${1};"),
("border-image-width", "border-image-width: ${1};"),
("border-inline", "border-inline: ${1};"),
("border-inline-color", "border-inline-color: ${1};"),
("border-inline-end", "border-inline-end: ${1};"),
("border-inline-end-color", "border-inline-end-color: ${1};"),
("border-inline-end-style", "border-inline-end-style: ${1};"),
("border-inline-end-width", "border-inline-end-width: ${1};"),
("border-inline-start", "border-inline-start: ${1};"),
("border-inline-start-color", "border-inline-start-color: ${1};"),
("border-inline-start-style", "border-inline-start-style: ${1};"),
("border-inline-start-width", "border-inline-start-width: ${1};"),
("border-inline-style", "border-inline-style: ${1};"),
("border-inline-width", "border-inline-width: ${1};"),
("border-left", "border-left: ${1};"),
("border-left-color", "border-left-color: ${1};"),
("border-left-style", "border-left-style: ${1};"),
("border-left-width", "border-left-width: ${1};"),
("border-radius", "border-radius: ${1};"),
("border-right", "border-right: ${1};"),
("border-right-color", "border-right-color: ${1};"),
("border-right-style", "border-right-style: ${1};"),
("border-right-width", "border-right-width: ${1};"),
("border-spacing", "border-spacing: ${1};"),
("border-style", "border-style: ${1};"),
("border-top", "border-top: ${1};"),
("border-top-color", "border-top-color: ${1};"),
("border-top-left-radius", "border-top-left-radius: ${1};"),
("border-top-right-radius", "border-top-right-radius: ${1};"),
("border-top-style", "border-top-style: ${1};"),
("border-top-width", "border-top-width: ${1};"),
("border-width", "border-width: ${1};"),
("bottom", "bottom: ${1};"),
("box-decoration-break", "box-decoration-break: ${1};"),
("box-shadow", "box-shadow: ${1};"),
("box-sizing", "box-sizing: ${1};"),
("box-snap", "box-snap: ${1};"),
("break-after", "break-after: ${1};"),
("break-before", "break-before: ${1};"),
("break-inside", "break-inside: ${1};"),
("caption-side", "caption-side: ${1};"),
("caret", "caret: ${1};"),
("caret-color", "caret-color: ${1};"),
("caret-shape", "caret-shape: ${1};"),
("chains", "chains: ${1};"),
("clear", "clear: ${1};"),
("clip-path", "clip-path: ${1};"),
("clip-rule", "clip-rule: ${1};"),
("color", "color: ${1};"),
("color-adjust", "color-adjust: ${1};"),
("color-interpolation-filters", "color-interpolation-filters: ${1};"),
("color-rendering", "color-rendering: ${1};"),
("color-scheme", "color-scheme: ${1};"),
("column-count", "column-count: ${1};"),
("column-fill", "column-fill: ${1};"),
("column-gap", "column-gap: ${1};"),
("column-rule", "column-rule: ${1};"),
("column-rule-color", "column-rule-color: ${1};"),
("column-rule-style", "column-rule-style: ${1};"),
("column-rule-width", "column-rule-width: ${1};"),
("column-span", "column-span: ${1};"),
("column-width", "column-width: ${1};"),
("columns", "columns: ${1};"),
("composes", "composes: ${1};"),
("contain", "contain: ${1};"),
("contain-intrinsic-size", "contain-intrinsic-size: ${1};"),
("content-visibility", "content-visibility: ${1};"),
("content", "content: ${1};"),
("continue", "continue: ${1};"),
("counter-increment", "counter-increment: ${1};"),
("counter-reset", "counter-reset: ${1};"),
("counter-set", "counter-set: ${1};"),
("cue", "cue: ${1};"),
("cue-after", "cue-after: ${1};"),
("cue-before", "cue-before: ${1};"),
("cursor", "cursor: ${1};"),
("direction", "direction: ${1};"),
("display", "display: ${1};"),
("dominant-baseline", "dominant-baseline: ${1};"),
("empty-cells", "empty-cells: ${1};"),
("fill", "fill: ${1};"),
("fill-break", "fill-break: ${1};"),
("fill-color", "fill-color: ${1};"),
("fill-image", "fill-image: ${1};"),
("fill-opacity", "fill-opacity: ${1};"),
("fill-origin", "fill-origin: ${1};"),
("fill-position", "fill-position: ${1};"),
("fill-repeat", "fill-repeat: ${1};"),
("fill-rule", "fill-rule: ${1};"),
("fill-size", "fill-size: ${1};"),
("filter", "filter: ${1};"),
("flex", "flex: ${1};"),
("flex-basis", "flex-basis: ${1};"),
("flex-direction", "flex-direction: ${1};"),
("flex-flow", "flex-flow: ${1};"),
("flex-grow", "flex-grow: ${1};"),
("flex-shrink", "flex-shrink: ${1};"),
("flex-wrap", "flex-wrap: ${1};"),
("float", "float: ${1};"),
("float-defer", "float-defer: ${1};"),
("float-offset", "float-offset: ${1};"),
("float-reference", "float-reference: ${1};"),
("flood-color", "flood-color: ${1};"),
("flood-opacity", "flood-opacity: ${1};"),
("flow", "flow: ${1};"),
("flow-from", "flow-from: ${1};"),
("flow-into", "flow-into: ${1};"),
("font", "font: ${1};"),
("font-family", "font-family: ${1};"),
("font-feature-settings", "font-feature-settings: ${1};"),
("font-kerning", "font-kerning: ${1};"),
("font-language-override", "font-language-override: ${1};"),
("font-max-size", "font-max-size: ${1};"),
("font-min-size", "font-min-size: ${1};"),
("font-optical-sizing", "font-optical-sizing: ${1};"),
("font-palette", "font-palette: ${1};"),
("font-presentation", "font-presentation: ${1};"),
("font-size", "font-size: ${1};"),
("font-size-adjust", "font-size-adjust: ${1};"),
("font-stretch", "font-stretch: ${1};"),
("font-style", "font-style: ${1};"),
("font-synthesis", "font-synthesis: ${1};"),
("font-synthesis-weight", "font-synthesis-weight: ${1};"),
("font-variant", "font-variant: ${1};"),
("font-variant-alternates", "font-variant-alternates: ${1};"),
("font-variant-caps", "font-variant-caps: ${1};"),
("font-variant-east-asian", "font-variant-east-asian: ${1};"),
("font-variant-emoji", "font-variant-emoji: ${1};"),
("font-variant-ligatures", "font-variant-ligatures: ${1};"),
("font-variant-numeric", "font-variant-numeric: ${1};"),
("font-variant-position", "font-variant-position: ${1};"),
("font-variation-settings", "font-variation-settings: ${1};"),
("font-weight", "font-weight: ${1};"),
("footnote-display", "footnote-display: ${1};"),
("footnote-policy", "footnote-policy: ${1};"),
("forced-color-adjust", "forced-color-adjust: ${1};"),
("gap", "gap: ${1};"),
("grid", "grid: ${1};"),
("grid-area", "grid-area: ${1};"),
("grid-auto-columns", "grid-auto-columns: ${1};"),
("grid-auto-flow", "grid-auto-flow: ${1};"),
("grid-auto-rows", "grid-auto-rows: ${1};"),
("grid-column", "grid-column: ${1};"),
("grid-column-end", "grid-column-end: ${1};"),
("grid-column-start", "grid-column-start: ${1};"),
("grid-gap", "grid-gap: ${1};"),
("grid-row", "grid-row: ${1};"),
("grid-row-end", "grid-row-end: ${1};"),
("grid-row-start", "grid-row-start: ${1};"),
("grid-template", "grid-template: ${1};"),
("grid-template-areas", "grid-template-areas: ${1};"),
("grid-template-columns", "grid-template-columns: ${1};"),
("grid-template-rows", "grid-template-rows: ${1};"),
("hanging-punctuation", "hanging-punctuation: ${1};"),
("height", "height: ${1};"),
("hyphenate-character", "hyphenate-character: ${1};"),
("hyphenate-limit-chars", "hyphenate-limit-chars: ${1};"),
("hyphenate-limit-last", "hyphenate-limit-last: ${1};"),
("hyphenate-limit-lines", "hyphenate-limit-lines: ${1};"),
("hyphenate-limit-zone", "hyphenate-limit-zone: ${1};"),
("hyphens", "hyphens: ${1};"),
("image-orientation", "image-orientation: ${1};"),
("image-rendering", "image-rendering: ${1};"),
("image-resolution", "image-resolution: ${1};"),
("initial-letter", "initial-letter: ${1};"),
("initial-letter-align", "initial-letter-align: ${1};"),
("initial-letter-wrap", "initial-letter-wrap: ${1};"),
("inline-size", "inline-size: ${1};"),
("inline-sizing", "inline-sizing: ${1};"),
("inset", "inset: ${1};"),
("inset-block", "inset-block: ${1};"),
("inset-block-end", "inset-block-end: ${1};"),
("inset-block-start", "inset-block-start: ${1};"),
("inset-inline-end", "inset-inline-end: ${1};"),
("inset-inline-start", "inset-inline-start: ${1};"),
("isolation", "isolation: ${1};"),
("justify-content", "justify-content: ${1};"),
("justify-items", "justify-items: ${1};"),
("justify-self", "justify-self: ${1};"),
("left", "left: ${1};"),
("leading-trim", "leading-trim: ${1};"),
("leading-trim-over", "leading-trim-over: ${1};"),
("leading-trim-under", "leading-trim-under: ${1};"),
("letter-spacing", "letter-spacing: ${1};"),
("lighting-color", "lighting-color: ${1};"),
("line-break", "line-break: ${1};"),
("line-clamp", "line-clamp: ${1};"),
("line-grid", "line-grid: ${1};"),
("line-height", "line-height: ${1};"),
("line-height-step", "line-height-step: ${1};"),
("line-padding", "line-padding: ${1};"),
("line-snap", "line-snap: ${1};"),
("list-style", "list-style: ${1};"),
("list-style-image", "list-style-image: ${1};"),
("list-style-position", "list-style-position: ${1};"),
("list-style-type", "list-style-type: ${1};"),
("margin", "margin: ${1};"),
("margin-block", "margin-block: ${1};"),
("margin-block-end", "margin-block-end: ${1};"),
("margin-block-start", "margin-block-start: ${1};"),
("margin-bottom", "margin-bottom: ${1};"),
("margin-inline", "margin-inline: ${1};"),
("margin-inline-end", "margin-inline-end: ${1};"),
("margin-inline-start", "margin-inline-start: ${1};"),
("margin-left", "margin-left: ${1};"),
("margin-right", "margin-right: ${1};"),
("margin-top", "margin-top: ${1};"),
("margin-trim", "margin-trim: ${1};"),
("marker", "marker: ${1};"),
("marker-end", "marker-end: ${1};"),
("marker-knockout-left", "marker-knockout-left: ${1};"),
("marker-knockout-right", "marker-knockout-right: ${1};"),
("marker-mid", "marker-mid: ${1};"),
("marker-pattern", "marker-pattern: ${1};"),
("marker-segment", "marker-segment: ${1};"),
("marker-side", "marker-side: ${1};"),
("marker-start", "marker-start: ${1};"),
("marks", "marks: ${1};"),
("mask", "mask: ${1};"),
("mask-border", "mask-border: ${1};"),
("mask-border-mode", "mask-border-mode: ${1};"),
("mask-border-outset", "mask-border-outset: ${1};"),
("mask-border-repeat", "mask-border-repeat: ${1};"),
("mask-border-slice", "mask-border-slice: ${1};"),
("mask-border-source", "mask-border-source: ${1};"),
("mask-border-width", "mask-border-width: ${1};"),
("mask-clip", "mask-clip: ${1};"),
("mask-composite", "mask-composite: ${1};"),
("mask-image", "mask-image: ${1};"),
("mask-mode", "mask-mode: ${1};"),
("mask-origin", "mask-origin: ${1};"),
("mask-position", "mask-position: ${1};"),
("mask-repeat", "mask-repeat: ${1};"),
("mask-size", "mask-size: ${1};"),
("mask-type", "mask-type: ${1};"),
("max-block-size", "max-block-size: ${1};"),
("max-height", "max-height: ${1};"),
("max-inline-size", "max-inline-size: ${1};"),
("max-lines", "max-lines: ${1};"),
("max-width", "max-width: ${1};"),
("min-block-size", "min-block-size: ${1};"),
("min-height", "min-height: ${1};"),
("min-inline-size", "min-inline-size: ${1};"),
("min-width", "min-width: ${1};"),
("mix-blend-mode", "mix-blend-mode: ${1};"),
("motion", "motion: ${1};"),
("motion-offset", "motion-offset: ${1};"),
("motion-path", "motion-path: ${1};"),
("motion-rotation", "motion-rotation: ${1};"),
("nav-down", "nav-down: ${1};"),
("nav-left", "nav-left: ${1};"),
("nav-right", "nav-right: ${1};"),
("nav-up", "nav-up: ${1};"),
("object-fit", "object-fit: ${1};"),
("object-position", "object-position: ${1};"),
("offset", "offset: ${1};"),
("offset-after", "offset-after: ${1};"),
("offset-anchor", "offset-anchor: ${1};"),
("offset-before", "offset-before: ${1};"),
("offset-distance", "offset-distance: ${1};"),
("offset-end", "offset-end: ${1};"),
("offset-path", "offset-path: ${1};"),
("offset-position", "offset-position: ${1};"),
("offset-rotate", "offset-rotate: ${1};"),
("offset-start", "offset-start: ${1};"),
("opacity", "opacity: ${1};"),
("order", "order: ${1};"),
("orphans", "orphans: ${1};"),
("outline", "outline: ${1};"),
("outline-color", "outline-color: ${1};"),
("outline-offset", "outline-offset: ${1};"),
("outline-style", "outline-style: ${1};"),
("outline-width", "outline-width: ${1};"),
("overflow", "overflow: ${1};"),
("overflow-anchor", "overflow-anchor: ${1};"),
("overflow-block", "overflow-block: ${1};"),
("overflow-clip-margin", "overflow-clip-margin: ${1};"),
("overflow-inline", "overflow-inline: ${1};"),
("overflow-wrap", "overflow-wrap: ${1};"),
("overflow-x", "overflow-x: ${1};"),
("overflow-y", "overflow-y: ${1};"),
("overscroll-behavior", "overscroll-behavior: ${1};"),
("overscroll-behavior-block", "overscroll-behavior-block: ${1};"),
("overscroll-behavior-inline", "overscroll-behavior-inline: ${1};"),
("overscroll-behavior-x", "overscroll-behavior-x: ${1};"),
("overscroll-behavior-y", "overscroll-behavior-y: ${1};"),
("padding", "padding: ${1};"),
("padding-block", "padding-block: ${1};"),
("padding-block-end", "padding-block-end: ${1};"),
("padding-block-start", "padding-block-start: ${1};"),
("padding-bottom", "padding-bottom: ${1};"),
("padding-inline-end", "padding-inline-end: ${1};"),
("padding-inline-start", "padding-inline-start: ${1};"),
("padding-left", "padding-left: ${1};"),
("padding-right", "padding-right: ${1};"),
("padding-top", "padding-top: ${1};"),
("page", "page: ${1};"),
("page-break-after", "page-break-after: ${1};"),
("page-break-before", "page-break-before: ${1};"),
("page-break-inside", "page-break-inside: ${1};"),
("paint-order", "paint-order: ${1};"),
("pause", "pause: ${1};"),
("pause-after", "pause-after: ${1};"),
("pause-before", "pause-before: ${1};"),
("perspective", "perspective: ${1};"),
("perspective-origin", "perspective-origin: ${1};"),
("place-content", "place-content: ${1};"),
("place-items", "place-items: ${1};"),
("place-self", "place-self: ${1};"),
("pointer-events", "pointer-events: ${1};"),
("polar-anchor", "polar-anchor: ${1};"),
("polar-angle", "polar-angle: ${1};"),
("polar-distance", "polar-distance: ${1};"),
("polar-origin", "polar-origin: ${1};"),
("position", "position: ${1};"),
("presentation-level", "presentation-level: ${1};"),
("quotes", "quotes: ${1};"),
("region-fragment", "region-fragment: ${1};"),
("resize", "resize: ${1};"),
("rest", "rest: ${1};"),
("rest-after", "rest-after: ${1};"),
("rest-before", "rest-before: ${1};"),
("right", "right: ${1};"),
("rotate", "rotate: ${1};"),
("rotation", "rotation: ${1};"),
("rotation-point", "rotation-point: ${1};"),
("row-gap", "row-gap: ${1};"),
("ruby-align", "ruby-align: ${1};"),
("ruby-merge", "ruby-merge: ${1};"),
("ruby-overhang", "ruby-overhang: ${1};"),
("ruby-position", "ruby-position: ${1};"),
("running", "running: ${1};"),
("scale", "scale: ${1};"),
("scrollbar-color", "scrollbar-color: ${1};"),
("scrollbar-width", "scrollbar-width: ${1};"),
("scroll-behavior", "scroll-behavior: ${1};"),
("scroll-margin", "scroll-margin: ${1};"),
("scroll-margin-block", "scroll-margin-block: ${1};"),
("scroll-margin-block-end", "scroll-margin-block-end: ${1};"),
("scroll-margin-block-start", "scroll-margin-block-start: ${1};"),
("scroll-margin-bottom", "scroll-margin-bottom: ${1};"),
("scroll-margin-inline", "scroll-margin-inline: ${1};"),
("scroll-margin-inline-end", "scroll-margin-inline-end: ${1};"),
("scroll-margin-inline-start", "scroll-margin-inline-start: ${1};"),
("scroll-margin-left", "scroll-margin-left: ${1};"),
("scroll-margin-right", "scroll-margin-right: ${1};"),
("scroll-margin-top", "scroll-margin-top: ${1};"),
("scroll-padding", "scroll-padding: ${1};"),
("scroll-padding-block", "scroll-padding-block: ${1};"),
("scroll-padding-block-end", "scroll-padding-block-end: ${1};"),
("scroll-padding-block-start", "scroll-padding-block-start: ${1};"),
("scroll-padding-bottom", "scroll-padding-bottom: ${1};"),
("scroll-padding-inline", "scroll-padding-inline: ${1};"),
("scroll-padding-inline-end", "scroll-padding-inline-end: ${1};"),
("scroll-padding-inline-start", "scroll-padding-inline-start: ${1};"),
("scroll-padding-left", "scroll-padding-left: ${1};"),
("scroll-padding-right", "scroll-padding-right: ${1};"),
("scroll-padding-top", "scroll-padding-top: ${1};"),
("scroll-snap-align", "scroll-snap-align: ${1};"),
("scroll-snap-stop", "scroll-snap-stop: ${1};"),
("scroll-snap-type", "scroll-snap-type: ${1};"),
("shape-image-threshold", "shape-image-threshold: ${1};"),
("shape-inside", "shape-inside: ${1};"),
("shape-margin", "shape-margin: ${1};"),
("shape-outside", "shape-outside: ${1};"),
("shape-rendering", "shape-rendering: ${1};"),
("size", "size: ${1};"),
("spatial-navigation-action", "spatial-navigation-action: ${1};"),
("spatial-navigation-contain", "spatial-navigation-contain: ${1};"),
("spatial-navigation-function", "spatial-navigation-function: ${1};"),
("speak", "speak: ${1};"),
("speak-as", "speak-as: ${1};"),
("stop-color", "stop-color: ${1};"),
("stop-opacity", "stop-opacity: ${1};"),
("string-set", "string-set: ${1};"),
("stroke", "stroke: ${1};"),
("stroke-align", "stroke-align: ${1};"),
("stroke-alignment", "stroke-alignment: ${1};"),
("stroke-break", "stroke-break: ${1};"),
("stroke-color", "stroke-color: ${1};"),
("stroke-dash-corner", "stroke-dash-corner: ${1};"),
("stroke-dash-justify", "stroke-dash-justify: ${1};"),
("stroke-dashadjust", "stroke-dashadjust: ${1};"),
("stroke-dasharray", "stroke-dasharray: ${1};"),
("stroke-dashcorner", "stroke-dashcorner: ${1};"),
("stroke-dashoffset", "stroke-dashoffset: ${1};"),
("stroke-image", "stroke-image: ${1};"),
("stroke-linecap", "stroke-linecap: ${1};"),
("stroke-linejoin", "stroke-linejoin: ${1};"),
("stroke-miterlimit", "stroke-miterlimit: ${1};"),
("stroke-opacity", "stroke-opacity: ${1};"),
("stroke-origin", "stroke-origin: ${1};"),
("stroke-position", "stroke-position: ${1};"),
("stroke-repeat", "stroke-repeat: ${1};"),
("stroke-width", "stroke-width: ${1};"),
("tab-size", "tab-size: ${1};"),
("table-layout", "table-layout: ${1};"),
("text-align", "text-align: ${1};"),
("text-align-all", "text-align-all: ${1};"),
("text-align-last", "text-align-last: ${1};"),
("text-anchor", "text-anchor: ${1};"),
("text-combine-upright", "text-combine-upright: ${1};"),
("text-decoration", "text-decoration: ${1};"),
("text-decoration-color", "text-decoration-color: ${1};"),
("text-decoration-line", "text-decoration-line: ${1};"),
("text-decoration-skip", "text-decoration-skip: ${1};"),
("text-decoration-skip-ink", "text-decoration-skip-ink: ${1};"),
("text-decoration-style", "text-decoration-style: ${1};"),
("text-decoration-thickness", "text-decoration-thickness: ${1};"),
("text-decoration-width", "text-decoration-width: ${1};"),
("text-edge", "text-edge: ${1};"),
("text-emphasis", "text-emphasis: ${1};"),
("text-emphasis-color", "text-emphasis-color: ${1};"),
("text-emphasis-position", "text-emphasis-position: ${1};"),
("text-emphasis-skip", "text-emphasis-skip: ${1};"),
("text-emphasis-style", "text-emphasis-style: ${1};"),
("text-group-align", "text-group-align: ${1};"),
("text-indent", "text-indent: ${1};"),
("text-justify", "text-justify: ${1};"),
("text-orientation", "text-orientation: ${1};"),
("text-overflow", "text-overflow: ${1};"),
("text-rendering", "text-rendering: ${1};"),
("text-shadow", "text-shadow: ${1};"),
("text-size-adjust", "text-size-adjust: ${1};"),
("text-space-collapse", "text-space-collapse: ${1};"),
("text-space-trim", "text-space-trim: ${1};"),
("text-spacing", "text-spacing: ${1};"),
("text-transform", "text-transform: ${1};"),
("text-underline-offset", "text-underline-offset: ${1};"),
("text-underline-position", "text-underline-position: ${1};"),
("text-wrap", "text-wrap: ${1};"),
("top", "top: ${1};"),
("transform", "transform: ${1};"),
("transform-box", "transform-box: ${1};"),
("transform-origin", "transform-origin: ${1};"),
("transform-style", "transform-style: ${1};"),
("transition", "transition: ${1};"),
("transition-delay", "transition-delay: ${1};"),
("transition-duration", "transition-duration: ${1};"),
("transition-property", "transition-property: ${1};"),
("transition-timing-function", "transition-timing-function: ${1};"),
("translate", "translate: ${1};"),
("unicode-bidi", "unicode-bidi: ${1};"),
("user-select", "user-select: ${1};"),
("vector-effect", "vector-effect: ${1};"),
("vertical-align", "vertical-align: ${1};"),
("visibility", "visibility: ${1};"),
("voice-balance", "voice-balance: ${1};"),
("voice-duration", "voice-duration: ${1};"),
("voice-family", "voice-family: ${1};"),
("voice-pitch", "voice-pitch: ${1};"),
("voice-range", "voice-range: ${1};"),
("voice-rate", "voice-rate: ${1};"),
("voice-stress", "voice-stress: ${1};"),
("voice-volume", "voice-volume: ${1};"),
("white-space", "white-space: ${1};"),
("widows", "widows: ${1};"),
("width", "width: ${1};"),
("will-change", "will-change: ${1};"),
("word-break", "word-break: ${1};"),
("word-boundary-detection", "word-boundary-detection: ${1};"),
("word-boundary-expansion", "word-boundary-expansion: ${1};"),
("word-spacing", "word-spacing: ${1};"),
("word-wrap", "word-wrap: ${1};"),
("wrap-after", "wrap-after: ${1};"),
("wrap-before", "wrap-before: ${1};"),
("wrap-flow", "wrap-flow: ${1};"),
("wrap-inside", "wrap-inside: ${1};"),
("wrap-through", "wrap-through: ${1};"),
("writing-mode", "writing-mode: ${1};"),
("z-index", "z-index: ${1};"),
]
media_features = [
("any-hover", "any-hover: ${0}"),
("any-pointer", "any-pointer: ${0}"),
("aspect-ratio", "aspect-ratio: ${1}/"),
("color", "color: ${0}"),
("color-gamut", "color-gamut: ${0}"),
("color-index", "color-index: ${0}"),
("dynamic-range", "dynamic-range: ${0}"),
("environment-blending", "environment-blending: ${0}"),
("forced-colors", "forced-colors: ${0}"),
("grid",),
("hover", "hover: ${0}"),
("inverted-colors", "inverted-colors: ${0}"),
("max-aspect-ratio", "max-aspect-ratio: ${0}"),
("max-color", "max-color: ${0}"),
("max-color-index", "max-color-index: ${0}"),
("max-height", "max-height: ${0}"),
("max-monochrome", "max-monochrome: ${0}"),
("max-resolution", "max-resolution: ${0}"),
("max-width", "max-width: ${0}"),
("min-aspect-ratio", "min-aspect-ratio: ${0}"),
("min-color", "min-color: ${0}"),
("min-color-index", "min-color-index: ${0}"),
("min-height", "min-height: ${0}"),
("min-monochrome", "min-monochrome: ${0}"),
("min-resolution", "min-resolution: ${0}"),
("min-width", "min-width: ${0}"),
("monochrome", "monochrome: ${0}"),
("orientation", "orientation: ${0}"),
("overflow-block", "overflow-block: ${0}"),
("overflow-inline", "overflow-inline: ${0}"),
("pointer", "pointer: ${0}"),
("prefers-color-scheme", "prefers-color-scheme: ${0}"),
("prefers-contrast", "prefers-contrast: ${0}"),
("prefers-reduced-data", "prefers-reduced-data: ${0}"),
("prefers-reduced-motion", "prefers-reduced-motion: ${0}"),
("prefers-reduced-transparency", "prefers-reduced-transparency: ${0}"),
("resolution", "resolution: ${0}"),
("scan", "scan: ${0}"),
("scripting",),
("update", "update: ${0}"),
]
name_to_completions = {
"align-content": [("normal",),]
+ t.baseline_position
+ t.content_distribution
+ t.content_position
+ t.overflow_position
+ t.aspect_ratio
+ t.number,
"align-items": [("normal",), ("stretch",),]
+ t.baseline_position
+ t.overflow_position
+ t.self_position,
"align-self": [("auto",), ("normal",), ("stretch",),]
+ t.baseline_position
+ t.overflow_position
+ t.self_position,
"alignment-baseline": t.alignment_baseline,
# "all": [], # TODO: write this when the context for any-value is completed
"animation": (
t.single_animation_direction
+ t.single_animation_fill_mode
+ t.single_animation_iteration_count
+ t.single_animation_name
+ t.single_animation_play_state
+ t.single_timing_function
+ t.time
),
"animation-composition": t.single_animation_composition,
"animation-delay": t.time,
"animation-direction": t.single_animation_direction,
"animation-duration": t.time,
"animation-fill-mode": t.single_animation_fill_mode,
"animation-iteration-count": t.single_animation_iteration_count,
"animation-name": t.single_animation_name,
"animation-play-state": t.single_animation_play_state,
"animation-timing-function": t.single_timing_function,
"appearance": [
("auto",),
("button",),
("checkbox",),
("listbox",),
("menulist",),
("menulist-button",),
("meter",),
("none",),
("progress-bar",),
("push-button",),
("radio",),
("searchfield",),
("slider-horizontal",),
("square-button",),
("textarea",),
("textfield",),
],
"aspect-ratio": [("auto",), t.ratio],
"backface-visibility": [("hidden",), ("visible",)],
"background-attachment": t.attachment,
"background-blend-mode": t.blend_mode,
"background-clip": t.box,
"background-color": t.color,
"background-image": t.bg_image,
"background-image-transform": [("logical",), ("physical",), ("rotate",)],
"background-origin": t.box,
"background-position": t.position,
"background-position-x": [
("center",),
("left",),
("right",),
("x-end",),
("x-start",),
] + t.length + t.percentage,
"background-position-y": [
("bottom",),
("center",),
("top",),
("y-end",),
("y-start",),
] + t.length + t.percentage,
"background-repeat": t.repeat_style,
"background-size": t.bg_size,
"background": (
t.attachment
+ t.bg_image
+ t.bg_size
+ t.box
+ t.color
+ t.position
+ t.repeat_style
),
"baseline-shift": t.baseline_shift,
"bleed": [("auto",)] + t.length,
"block-ellipsis": t.block_ellipsis,
"block-inline-size": [
("auto",),
("contain",),
("fit-content",),
("stretch",),
t.fit_content,
] + t.length + t.percentage,
"block-step": [
("auto",),
("center",),
("down",),
("end",),
("margin",),
("nearest",),
("none",),
("padding",),
("start",),
("up",),
]
+ t.length,
"block-step-align": [("auto",), ("center",), ("end",), ("start",)],
"block-step-insert": [("margin",), ("padding",)],
"block-step-round": [("down",), ("nearest",), ("up",)],
"block-step-size": [("none",)] + t.length,
"bookmark-label": t.content_list,
"bookmark-level": [("none",)] + t.integer,
"bookmark-state": [("closed",), ("open",)],
"border": t.border_style + t.border_width + t.color,
"border-block-inline": t.color + t.line_style + t.line_width,
"border-block-inline-color": t.color,
"border-block-inline-style": t.line_style,
"border-block-inline-width": t.line_width,
"border-boundary": [("display",), ("none",), ("parent",)],
"border-collapse": [("collapse",), ("separate",)],
"border-color": t.color,
"border-image": [
("auto",),
("fill",),
("none",),
("repeat",),
("round",),
("space",),
("stretch",),
]
+ t.image
+ t.integer
+ t.length
+ t.number,
"border-image-outset": t.length + t.number,
"border-image-repeat": [("repeat",), ("round",), ("space",), ("stretch",)],
"border-image-slice": [("fill",)] + t.integer + t.percentage,
"border-image-source": [("none",)] + t.image,
"border-image-transform": [("logical",), ("physical",), ("rotate",)],
"border-image-width": [("auto",)] + t.length + t.number + t.percentage,
"border-radius": t.length + t.percentage,
"border-spacing": t.length,
"border-style": t.border_style,
"border-top-right-left-bottom": t.border_style + t.border_width + t.color,
"border-width": t.border_width,
"box-decoration-break": [("clone",), ("slice",)],
"box-shadow": [("inset",), ("none",)] + t.color + t.length,
"box-sizing": [("border-box",), ("content-box",)],
"box-snap": [
("baseline",),
("block-end",),
("block-start",),
("center",),
("last-baseline",),
("none",),
],
"break-before-after": [
("always",),
("auto",),
("avoid",),
("avoid-column",),
("avoid-page",),
("avoid-region",),
("column",),
("left",),
("page",),
("recto",),
("region",),
("right",),
("verso",),
],
"break-inside": [
("auto",),
("avoid",),
("avoid-column",),
("avoid-page",),
("avoid-region",),
],
"caption-side": [("bottom",), ("inline-end",), ("inline-start",), ("top",)],
"caret-color": [("auto",)] + t.color,
"caret-shape": t.caret_shape,
"caret": [("auto",)] + t.caret_shape + t.color,
"chains": [("none",), t.identifier],
"clear": [
("block-end",),
("block-start",),
("both",),
("both-block",),
("both-inline",),
("bottom",),
("inline-end",),
("inline-start",),
("left",),
("none",),
("right",),
("top",),
],
"clip-path": [("none",), t.url] + t.basic_shape + t.geometry_box,
"clip-rule": [("evenodd",), ("nonzero",)],
"color": t.color,
"color-adjust": [("economy",), ("exact",)],
"color-gamut": [("p3",), ("rec2020",), ("srgb",)],
"color-interpolation-filters": [("auto",), ("linearRGB",), ("sRGB",)],
"color-rendering": [("auto",), ("optimizeQuality",), ("optimizeSpeed",)],
"color-scheme": [("dark",), ("light",), ("normal",), ("only",), t.identifier],
"columns": [("auto",)] + t.integer + t.length,
"column-count": [("auto",)] + t.integer,
"column-fill": [("auto",), ("balance",), ("balance-all",)],
"column-rule": t.color + t.line_style + t.line_width,
"column-rule-color": t.color,
"column-rule-style": t.line_style,
"column-rule-width": t.line_width,
"column-span": [("all",), ("none",)],
"column-width": [("auto",)] + t.length,
"composes": [("from",), t.string],
"contain": [
("content",),
("layout",),
("none",),
("paint",),
("size",),
("strict",),
("style",),
],
"contain-intrinsic-size": [("none",)] + t.length,
"content-visibility": [("auto",), ("hidden",), ("visibile",)],
"content": [("none",), ("normal",), t.attr, t.counter, t.counters, t.string]
+ t.content_list
+ t.image,
"continue": [
("auto",),
("discard",),
("fragments",),
("overflow",),
("paginate",),
],
"counter-set-reset-increment": [("none",), t.attr, t.identifier] + t.integer,
"cue-before-after": [("none",), t.url] + t.decibel,
"cursor": [
("alias",),
("all-scroll",),
("auto",),
("cell",),
("col-resize",),
("context-menu",),
("copy",),
("crosshair",),
("default",),
("e-resize",),
("ew-resize",),
("grab",),
("grabbing",),
("help",),
("move",),
("n-resize",),
("ne-resize",),
("nesw-resize",),
("no-drop",),
("none",),
("not-allowed",),
("ns-resize",),
("nw-resize",),
("nwse-resize",),
("pointer",),
("progress",),
("row-resize",),
("s-resize",),
("se-resize",),
("sw-resize",),
("text",),
("vertical-text",),
("w-resize",),
("wait",),
("zoom-in",),
("zoom-out",),
t.url,
]
+ t.number,
"direction": [("ltr",), ("rtl",)],
"display": (
t.display_box
+ t.display_inside
+ t.display_internal
+ t.display_legacy
+ t.display_listitem
+ t.display_outside
),
"dominant-baseline": [
("alphabetic",),
("auto",),
("central",),
("hanging",),
("ideographic",),
("mathematical",),
("middle",),
("text-bottom",),
("text-top",),
],
"dynamic-range": [("high",), ("standard",)],
"empty-cells": [("hide",), ("show",)],
"environment-blending": [("additive",), ("opaque",), ("subtractive",)],
"fill": t.background,
"fill-break": [("bounding-box",), ("clone",), ("slice",)],
"fill-color": t.color,
"fill-image": t.paint,
"fill-opacity": t.number,
"fill-origin": [
("border-box",),
("content-box",),
("fill-box",),
("match-parent",),
("padding-box",),
("stroke-box",),
],
"fill-position": t.position,
"fill-repeat": t.repeat_style,
"fill-rule": [("evenodd",), ("nonzero",)],
"fill-size": t.bg_size,
"filter": [("none",), t.blur, t.drop_shadow, t.filter_func, t.hue_rotate, t.url],
"flex": [("auto",), ("content",), ("initial",), ("none",)]
+ t.length
+ t.number
+ t.percentage,
"flex-basis": [("auto",), ("content",)] + t.length + t.percentage + t.width,
"flex-direction": t.flex_direction,
"flex-flow": t.flex_direction + t.flex_wrap,
"flex-shrink-grow": t.number,
"flex-wrap": t.flex_wrap,
"float": [
("block-end",),
("block-start",),
("bottom",),
("inline-end",),
("inline-start",),
("left",),
("none",),
("right",),
("snap-block",),
("snap-inline",),
("top",),
t.snap_block,
t.snap_inline,
],
"float-defer": [("last",), ("none",)] + t.integer,
"float-offset": t.length + t.percentage,
"float-reference": [("column",), ("inline",), ("page",), ("region",)],
"flood-color": t.color,
"flood-opacity": t.number + t.percentage,
"flow": [("auto",), ("same",), t.identifier, t.string],
"flow-from": [("none",), t.identifier],
"flow-into": [("content",), ("element",), ("none",), t.identifier],
"font": [
("100",),
("200",),
("300",),
("400",),
("500",),
("600",),
("700",),
("800",),
("900",),
("bold",),
("bolder",),
("caption",),
("condensed",),
("expanded",),
("extra-condensed",),
("extra-expanded",),
("icon",),
("italic",),
("large",),
("larger",),
("lighter",),
("medium",),
("menu",),
("message-box",),
("normal",),
("oblique",),
("semi-condensed",),
("semi-expanded",),
("small",),
("small-caps",),
("small-caption",),
("smaller",),
("status-bar",),
("ultra-condensed",),
("ultra-expanded",),
("x-large",),
("x-small",),
("xx-large",),
("xx-small",),
]
+ (t.font_family_generic + t.font_family_name + t.length + t.number + t.percentage),
"font-family": t.font_family_generic + t.font_family_name,
"font-feature-settings": [("normal",)] + t.feature_tag_value,
"font-kerning": [("auto",), ("none",), ("normal",)],
"font-language-override": [("normal",), t.string],
"font-optical-sizing": [("auto",), ("none",)],
"font-palette": [("dark",), ("light",), ("normal",), t.identifier,],
"font-presentation": [("auto",), ("emoji",), ("text",)],
"font-size": [
("infinity",),
("large",),
("larger",),
("medium",),
("small",),
("smaller",),
("x-large",),
("x-small",),
("xx-large",),
("xx-small",),
]
+ t.length
+ t.percentage,
"font-size-adjust": [("none",)] + t.number,
"font-stretch": [
("condensed",),
("expanded",),
("extra-condensed",),
("extra-expanded",),
("normal",),
("semi-condensed",),
("semi-expanded",),
("ultra-condensed",),
("ultra-expanded",),
]
+ t.percentage,
"font-style": [("italic",), ("normal",), ("oblique",)] + t.angle,
"font-synthesis": [("none",), ("small-caps",), ("style",), ("weight",)],
"font-synthesis-misc": [("auto",), ("none",)],
"font-variant": [
("all-petite-caps",),
("all-small-caps",),
("none",),
("normal",),
("ordinal",),
("petite-caps",),
("ruby",),
("slashed-zero",),
("small-caps",),
("sub",),
("super",),
("titling-caps",),
("unicase",),
t.annotation,
t.character_variant,
t.ornaments,
t.stylistic,
t.styleset,
t.swash,
]
+ t.font_variant,
"font-variant-alternates": [
("historical-forms",),
("normal",),
t.annotation,
t.character_variant,
t.ornaments,
t.stylistic,
t.styleset,
t.swash,
],
"font-variant-caps": [
("all-petite-caps",),
("all-small-caps",),
("normal",),
("petite-caps",),
("small-caps",),
("titling-caps",),
("unicase",),
],
"font-variant-east-asian": [("normal",), ("ruby",),]
+ t.east_asian_width_values
+ t.east_asian_variant_values,
"font-variant-emoji": [("auto",), ("emoji",), ("text",),],
"font-variant-ligatures": [("none",), ("normal",),]
+ (
t.common_lig_values
+ t.contextual_alt_values
+ t.discretionary_lig_values
+ t.historical_lig_values
),
"font-variant-numeric": [("normal",), ("ordinal",), ("slashed-zero",),]
+ t.numeric_figure_values
+ t.numeric_fraction_values
+ t.numeric_spacing_values,
"font-variant-position": [("normal",), ("sub",), ("super",)],
"font-variation-settings": [("normal",), t.string] + t.number,
"font-weight": [
("100",),
("200",),
("300",),
("400",),
("500",),
("600",),
("700",),
("800",),
("900",),
("bold",),
("bolder",),
("lighter",),
("normal",),
],
"footnote-display": [("block",), ("compact",), ("inline",)],
"footnote-policy": [("auto",), ("block",), ("line",)],
"forced-colors": [("active",), ("none",)],
"forced-color-adjust": [("auto",), ("none",)],
"grid": [("auto-flow",), ("dense",), ("none",), ("subgrid",), t.string,]
+ t.auto_track_list
+ t.track_list,
"grid-row-column-area": t.grid_line,
"grid-row-column-gap": t.length + t.percentage,
"grid-auto-flow": [("column",), ("dense",), ("row",)],
"grid-auto-rows-columns": t.track_size,
"grid-template-areas": [("none",), t.string],
"grid-template-rows-columns": [("none",), ("subgrid",),]
+ t.auto_track_list
+ t.track_list,
"grid-template": [("none",), ("subgrid",), t.line_names, t.string,]
+ t.fixed_size
+ t.track_size,
"hanging-punctuation": [
("allow-end",),
("first",),
("force-end",),
("last",),
("none",),
],
"height": [
("auto",),
("contain",),
("fill",),
("fit-content",),
("max-content",),
("min-content",),
("none",),
("stretch",),
t.fit_content,
]
+ t.length
+ t.percentage,
"hover": [("hover",), ("none",)],
"hyphenate-character": [("auto",), t.string,],
"hyphenate-limit-chars": [("auto",)] + t.integer,
"hyphenate-limit-last": [
("always",),
("column",),
("none",),
("page",),
("spread",),
],
"hyphenate-limit-lines": [("no-limit",)] + t.integer,
"hyphenate-limit-zone": t.length + t.percentage,
"hyphens": [("auto",), ("manual",), ("none",)],
"image-orientation": [("flip",), ("from-image",)] + t.angle,
"image-rendering": [("auto",), ("crisp-edges",), ("pixelated",)],
"image-resolution": [("from-image",), ("snap",)] + t.resolution,
"initial-letter": [("drop",), ("normal",), ("raise",)] + t.integer + t.number,
"initial-letter-align": [
("alphabetic",),
("border-box",),
("hanging",),
("ideographic",),
("leading",),
],
"initial-letter-wrap": [("all",), ("first",), ("grid",), ("none",),]
+ t.length
+ t.percentage,
"inline-sizing": [("normal",),("stretch",)],
"inset-block-inline": [("auto",)] + t.length + t.percentage,
"inverted-colors": [("inverted",), ("none",)],
"isolation": t.isolation_mode,
"justify-content": [
("center",),
("flex-end",),
("flex-start",),
("left",),
("normal",),
("right",),
("space-around",),
("space-between",),
]
+ (
t.content_distribution
+ t.overflow_position
+ t.content_position
+ t.aspect_ratio
+ t.number
),
"justify-items": [
("center",),
("left",),
("legacy",),
("normal",),
("right",),
("stretch",),
]
+ (t.baseline_position + t.overflow_position + t.self_position),
"justify-self": [("auto",), ("left",), ("normal",), ("right",), ("stretch",),]
+ t.baseline_position
+ t.overflow_position
+ t.self_position,
"leading-trim": [("both",), ("end",), ("normal",), ("start",)],
"leading-trim-under": [("alphabetic",), ("ideographic",), ("ideographic-ink",), ("normal",), ("text",)],
"letter-spacing": [("normal",)] + t.length,
"lighting-color": t.color,
"line-break": [("auto",), ("loose",), ("normal",), ("strict",)],
"line-clamp": t.block_ellipsis + t.integer,
"line-grid": [("create",), ("match-parent",)],
"line-height": [("normal",)] + t.length + t.number + t.percentage,
"line-height-step": [("none",)] + t.length,
"line-padding": t.length,
"line-sizing": [("legacy",), ("normal",)],
"line-snap": [("baseline",), ("contain",), ("none",)],
"list-style": [("inside",), ("none",), ("outside",), t.identifier, t.string,]
+ t.image,
"list-style-image": [("none",)] + t.image,
"list-style-position": [("inside",), ("outside",)],
"list-style-type": [("none",), t.identifier, t.string, t.symbols],
"margin": [("auto",), ("logical",)] + t.length + t.percentage,
"margin-block-inline": [("auto",)] + t.length + t.percentage,
"margin-trim": [("none",), ("in-flow",), ("all",)],
"marker": [("none",)] + t.length + t.percentage,
"marker-knockout": t.knockout_offset + t.knockout_shape,
"marker-pattern": t.marker_gap + t.marker_ref,
"marker-start-segment-mid-end": [("none",)] + t.marker_ref,
"marker-side": [("match-parent",), ("match-self",)],
"marks": [("crop",), ("cross",), ("none",)],
"mask": t.mask_layer,
"mask-border": [
("alpha",),
("auto",),
("fill",),
("luminance",),
("repeat",),
("round",),
("space",),
("stretch",),
]
+ t.image
+ t.length
+ t.number
+ t.percentage,
"mask-border-mode": [("alpha",), ("luminance",),],
"mask-border-outset": t.length + t.percentage,
"mask-border-repeat": [("repeat",), ("round",), ("space",), ("stretch",),],
"mask-border-slice": [("fill",)] + t.number + t.percentage,
"mask-border-source": t.image,
"mask-border-width": [("auto",)] + t.length + t.number + t.percentage,
"mask-clip": [("no-clip",)] + t.geometry_box,
"mask-composite": t.compositing_operator,
"mask-image": t.mask_reference,
"mask-mode": t.masking_mode,
"mask-origin": t.geometry_box,
"mask-position": t.position,
"mask-repeat": t.repeat_style,
"mask-size": t.bg_size,
"mask-type": [("alpha",), ("luminance",),],
"max-lines": [("none",)] + t.integer,
"media-feature-grid": [("0",), ("1",)],
"mix-blend-mode": t.blend_mode,
"monochrome-color-index": t.integer,
"motion": [("auto",), ("none",), ("reverse",), t.path, t.url,]
+ (t.angle + t.basic_shape + t.geometry_box + t.length + t.percentage),
"motion-offset": t.length + t.percentage,
"motion-path": [("none",), t.path, t.url,] + t.basic_shape + t.geometry_box,
"motion-rotation": [("auto",), ("reverse",)] + t.angle,
"nav-up-right-left-down": [("auto",), ("current",), ("root",), t.string],
"object-fit": [("contain",), ("cover",), ("fill",), ("none",), ("scale-down",),],
"object-position": t.position,
"offset": [("auto",), ("none",), ("reverse",), t.path, t.ray, t.url,]
+ t.angle
+ t.basic_shape
+ t.extent_keyword
+ t.geometry_box
+ t.length
+ t.percentage
+ t.position,
"offset-anchor": [("auto",)] + t.position,
"offset-distance": t.extent_keyword + t.length + t.percentage,
"offset-path": [("none",), t.path, t.ray, t.url,] + t.basic_shape + t.geometry_box,
"offset-position": [("auto",)] + t.position,
"offset-rotate": [("auto",), ("reverse",)] + t.angle,
"offset-start-end-before-after": [("auto",)] + t.length + t.percentage,
"opacity": t.number,
"order": t.integer,
"orientation": [("landscape",), ("portrait",)],
"outline": [("auto",), ("invert",)] + (t.border_style + t.border_width + t.color),
"outline-color": [("invert",)] + t.color,
"outline-offset": t.length,
"outline-style": [("auto",)] + t.border_style,
"outline-width": t.border_width,
"overflow": [("auto",), ("clip",), ("hidden",), ("scroll",), ("visible",)],
"overflow-block": [("none",), ("paged",), ("scroll",)],
"overflow-clip-margin": t.length,
"overflow-inline": [("none",), ("scroll",)],
"overflow-inline-block": [("auto",), ("clip",), ("hidden",), ("scroll",), ("visible",)],
"overflow-anchor": [("auto",), ("none",)],
"overflow-wrap": [("anywhere",), ("break-word",), ("normal",)],
"overflow-x-y": [("auto",), ("clip",), ("hidden",), ("scroll",), ("visible",),],
"overscroll-behavior": [("auto",), ("contain",), ("none",)],
"padding": [("logical",)] + t.length + t.percentage,
"padding-block-inline": t.length + t.percentage,
"page": [("auto",), t.identifier],
"page-break-before-after": [
("always",),
("auto",),
("avoid",),
("left",),
("recto",),
("right",),
("verso",),
],
"page-break-inside": [("avoid",), ("auto",)],
"paint-order": [("fill",), ("markers",), ("normal",), ("stroke",)],
"pause": [
("medium",),
("none",),
("strong",),
("weak",),
("x-strong",),
("x-weak",),
]
+ t.time,
"perspective": [("none",)] + t.length,
"perspective-origin": t.position,
"place-content": [("left",), ("normal",), ("right",),]
+ (
t.baseline_position
+ t.content_distribution
+ t.overflow_position
+ t.content_position
),
"place-items": [
("auto",),
("center",),
("left",),
("legacy",),
("normal",),
("right",),
("stretch",),
]
+ (t.baseline_position + t.overflow_position + t.self_position),
"place-self": [("auto",), ("left",), ("normal",), ("right",), ("stretch",),]
+ (t.baseline_position + t.overflow_position + t.self_position),
"pointer": [("coarse",), ("fine",), ("none",)],
"pointer-events": [
("all",),
("bounding-box",),
("fill",),
("none",),
("painted",),
("stroke",),
("visible",),
("visibleFill",),
("visiblePainted",),
("visibleStroke",),
],
"polar-anchor": t.position,
"polar-angle": t.angle,
"polar-distance": [("contain",)] + (t.extent_keyword + t.length + t.percentage),
"polar-origin": [("auto",)] + t.position,
"position": [("absolute",), ("fixed",), ("relative",), ("static",), ("sticky",),],
"prefers-color-scheme": [("dark",), ("light",)],
"prefers-contrast": [("forced",), ("high",), ("low",), ("no-preference",)],
"prefers-reduced-data": [("no-preference",), ("reduced",)],
"prefers-reduced-motion": [("no-preference",), ("reduced",)],
"prefers-reduced-transparency": [("no-preference",), ("reduced",)],
"presentation-level": [("increment",), ("same",)] + t.integer,
"quotes": [("none",), t.string],
"region-fragment": [("auto",), ("break",)],
"resize": [
("block",),
("both",),
("horizontal",),
("inline",),
("none",),
("vertical",),
],
"resolution": [("infinite",)] + t.resolution,
"rest": [
("medium",),
("none",),
("strong",),
("weak",),
("x-strong",),
("x-weak",),
]
+ t.time,
"rotate": [("none",), ("x",), ("y",), ("z",)] + t.angle + t.number,
"rotation": t.angle,
"rotation-point": t.position,
"row-column-gap": [("normal",)] + t.length + t.percentage,
"ruby-align": [("center",), ("space-around",), ("space-between",), ("start",),],
"ruby-merge": [("auto",), ("collapse",), ("separate",)],
"ruby-overhang": [("auto",), ("none",)],
"ruby-position": [("inter-character",), ("over",), ("under",)],
"running": [t.identifier],
"scale": [("none",)] + t.number,
"scan": [("interlace",), ("progressive",)],
"scripting": [("enabled",), ("initial-only",), ("none",)],
"scrollbar-color": [("auto",)] + t.color,
"scrollbar-width": [("thin",), ("none",), ("auto",)],
"scroll-behavior": [("auto",), ("smooth",)],
"scroll-padding": t.length + t.percentage,
"scroll-snap-align": [("center",), ("end",), ("none",), ("start",)],
"scroll-margin": t.length,
"scroll-snap-stop": [("always",), ("normal",)],
"scroll-snap-type": [
("block",),
("both",),
("inline",),
("mandatory",),
("none",),
("proximity",),
("x",),
("y",),
],
"shape-image-threshold": t.number,
"shape-inside": [("auto",), ("display",), ("outside-shape",), ("shape-box",),]
+ t.basic_shape
+ t.image,
"shape-margin": t.length + t.percentage,
"shape-outside": [("none",)] + t.basic_shape + t.image + t.shape_box,
"shape-rendering": [
("auto",),
("crispEdges",),
("geometricPrecision",),
("optimizeSpeed",),
],
"size": [("auto",), ("landscape",), ("portrait",)] + t.page_size + t.length,
"spatial-navigation-action": [("auto",), ("focus",), ("scroll",)],
"spatial-navigation-contain": [("auto",), ("contain",)],
"spatial-navigation-function": [("normal",), ("grid",)],
"speak": [("always",), ("auto",), ("never",)],
"speak-as": [
("digits",),
("literal-punctuation",),
("no-punctuation",),
("normal",),
("spell-out",),
],
"stop-color": t.color,
"stop-opacity": t.number,
"string-set": [("none",), t.attr, t.content, t.identifier, t.url,] + t.content_list,
"stroke": t.background,
"stroke-align": [("center",), ("inner",), ("outer",)],
"stroke-alignment": [("center",), ("inner",), ("outer",)],
"stroke-break": [("bounding-box",), ("clone",), ("slice",)],
"stroke-color": t.color,
"stroke-dashadjust": [
("compress",),
("dashes",),
("gaps",),
("none",),
("stretch",),
],
"stroke-dasharray": [("none",)] + t.dasharray,
"stroke-dash-corner": [("none",)] + t.length,
"stroke-dashcorner": [("none",)] + t.length,
"stroke-dash-justify": [
("compress",),
("dashes",),
("gaps",),
("none",),
("stretch",),
],
"stroke-dashoffset": t.length + t.percentage,
"stroke-image": t.paint,
"stroke-linecap": [("butt",), ("round",), ("square",)],
"stroke-linejoin": [
("arcs",),
("bevel",),
("crop",),
("fallback",),
("miter",),
("miter-clip",),
("round",),
("stupid",),
],
"stroke-miterlimit": t.number,
"stroke-opacity": t.number,
"stroke-origin": [
("border-box",),
("content-box",),
("fill-box",),
("match-parent",),
("padding-box",),
("stroke-box",),
],
"stroke-position": t.position,
"stroke-repeat": t.repeat_style,
"stroke-width": t.length + t.percentage,
"tab-size": t.length + t.number,
"table-layout": [("auto",), ("fixed",)],
"text-align": [
("center",),
("end",),
("justify",),
("justify-all",),
("left",),
("match-parent",),
("right",),
("start",),
],
"text-align-all": [
("center",),
("end",),
("justify",),
("left",),
("match-parent",),
("right",),
("start",),
],
"text-align-last": [
("auto",),
("center",),
("end",),
("justify",),
("left",),
("match-parent",),
("right",),
("start",),
],
"text-anchor": [("end",), ("middle",), ("start",)],
"text-combine-upright": [("all",), ("none",)] + t.integer,
"text-decoration": [
("blink",),
("dashed",),
("dotted",),
("double",),
("line-through",),
("none",),
("overline",),
("solid",),
("underline",),
("wavy",),
]
+ t.color,
"text-decoration-color": t.color,
"text-decoration-line": [
("blink",),
("line-through",),
("grammar-error",),
("none",),
("overline",),
("spelling-error",),
("underline",),
],
"text-decoration-skip": [
("box-decoration",),
("edges",),
("leading-spaces",),
("none",),
("objects",),
("spaces",),
("trailing-spaces",),
],
"text-decoration-skip-ink": [("auto",), ("none",),],
"text-decoration-style": [
("dashed",),
("dotted",),
("double",),
("solid",),
("wavy",),
],
"text-decoration-thickness": [("auto",), ("from-font",)] + t.length + t.percentage,
"text-decoration-width": [("auto",),] + t.length,
"text-edge": [
("alphabetic",),
("cap",),
("ex",),
("ideographic",),
("ideographic-ink",),
("leading",),
("text",),
],
"text-emphasis": [
("circle",),
("dot",),
("double-circle",),
("filled",),
("none",),
("open",),
("sesame",),
("triangle",),
t.string,
]
+ t.color,
"text-emphasis-color": t.color,
"text-emphasis-position": [("left",), ("over",), ("right",), ("under",)],
"text-emphasis-skip": [("narrow",), ("punctuation",), ("spaces",), ("symbols",),],
"text-emphasis-style": [
("circle",),
("dot",),
("double-circle",),
("filled",),
("none",),
("open",),
("sesame",),
("triangle",),
t.string,
],
"text-group-align": [("center",), ("end",), ("left",), ("none",), ("right",), ("start",)],
"text-indent": [("each-line",), ("hanging",)] + t.length + t.percentage,
"text-justify": [("auto",), ("inter-character",), ("inter-word",), ("none",),],
"text-orientation": [("mixed",), ("sideways",), ("upright",)],
"text-overflow": [("clip",), ("ellipsis",)],
"text-rendering": [
("auto",),
("geometricPrecision",),
("optimizeLegibility",),
("optimizeSpeed",),
],
"text-shadow": [("none",)] + t.color + t.length,
"text-size-adjust": [("auto",), ("none",)] + t.percentage,
"text-space-collapse": [
("collapse",),
("discard",),
("preserve",),
("preserve-breaks",),
("preserve-spaces",),
],
"text-space-trim": [
("discard-after",),
("discard-before",),
("none",),
("trim-inner",),
],
"text-spacing": [
("allow-end",),
("ideograph-alpha",),
("ideograph-numeric",),
("no-compress",),
("none",),
("normal",),
("punctuation",),
("space-adjacent",),
("space-end",),
("space-first",),
("space-start",),
("trim-adjacent",),
("trim-end",),
("trim-start",),
],
"text-transform": [
("capitalize",),
("full-size-kana",),
("full-width",),
("lowercase",),
("none",),
("uppercase",),
],
"text-underline-offset": [("auto",)] + t.length + t.percentage,
"text-underline-position": [("auto",), ("from-font",), ("left",), ("right",), ("under",)],
"text-wrap": [("balance",), ("nowrap",), ("pretty",), ("stable",), ("wrap",)],
"top-right-left-bottom": [
("auto",),
("block-end",),
("block-start",),
("inline-end",),
("inline-start",),
]
+ t.length
+ t.percentage,
"transform": [("none",)] + t.transform_list,
"transform-box": [("border-box",), ("fill-box",), ("view-box",)],
"transform-origin": t.position,
"transform-style": [("flat",), ("preserve-3d",)],
"transition": t.single_transition,
"transition-delay": t.time,
"transition-duration": t.time,
"transition-property": [("all",), ("none",), t.identifier],
"transition-timing-function": t.single_timing_function,
"translate": t.length + t.percentage,
"unicode-bidi": [
("bidi-override",),
("embed",),
("isolate",),
("isolate-override",),
("normal",),
("plaintext",),
],
"update": [("fast",), ("none",), ("slow",)],
"user-select": [("all",), ("auto",), ("contain",), ("none",), ("text",)],
"vector-effect": [("non-scaling-stroke",), ("none",)],
"vertical-align": t.alignment_baseline + t.baseline_shift + t.baseline_source,
"visibility": [("collapse",), ("hidden",), ("visible",)],
"voice-balance": [
("center",),
("left",),
("leftwards",),
("right",),
("rightwards",),
]
+ t.number,
"voice-duration": [("auto",)] + t.time,
"voice-family": [("preserve",)] + t.generic_voice + t.voice_name,
"voice-range-pitch": [
("absolute",),
("high",),
("low",),
("medium",),
("x-high",),
("x-low",),
]
+ t.frequency
+ t.percentage
+ t.semitones,
"voice-rate": [
("fast",),
("medium",),
("normal",),
("slow",),
("x-fast",),
("x-slow",),
]
+ t.percentage,
"voice-stress": [("moderate",), ("none",), ("normal",), ("reduced",), ("strong",),],
"voice-volume": [
("loud",),
("medium",),
("silent",),
("soft",),
("x-loud",),
("x-soft",),
]
+ t.decibel,
"white-space": [
("break-spaces",),
("normal",),
("nowrap",),
("pre",),
("pre-line",),
("pre-wrap",),
],
"widows-orphans": t.integer,
"width": [
("auto",),
("contain",),
("fill",),
("fit-content",),
("max-content",),
("min-content",),
("none",),
("stretch",),
t.fit_content,
]
+ t.length
+ t.percentage,
# width-height is a media-feature. Do not confuse it with the width and
# height properties.
"width-height": t.length,
"will-change": [("auto",)] + t.animateable_feature,
"word-break": [("break-all",), ("keep-all",), ("normal",)],
"word-boundary-detection": [("manual",), ("none",), t.auto],
"word-boundary-expansion": [("ideographic-space",), ("none",), ("space",)],
"word-spacing": [("normal",)] + t.length,
"word-wrap": [("anywhere",), ("break-word",), ("normal",)],
"wrap-before-after": [
("auto",),
("avoid",),
("avoid-flex",),
("avoid-line",),
("flex",),
("line",),
],
"wrap-flow": [
("auto",),
("both",),
("clear",),
("end",),
("maximum",),
("minimum",),
("start",),
],
"wrap-inside": [("auto",), ("avoid",)],
"wrap-through": [("none",), ("wrap",)],
"writing-mode": [
("horizontal-tb",),
("vertical-lr",),
("vertical-rl",),
],
"z-index": [("auto",)] + t.integer,
}
allow_word_completions = frozenset(
(
"animation",
"animation-name",
"chains",
"composes",
"counter-set-reset-increment",
"custom",
"flow",
"flow-from",
"flow-into",
"font",
"font-palette",
"grid-row-column-area",
"list-style",
"list-style-type",
"nav-up-right-left-down",
"page",
"running",
"string-set",
"transition",
"transition-property",
"voice-family",
"will-change",
)
)
def get_values(property_name):
completions = name_to_completions.get(property_name, []) + [t.var]
if property_name in allow_word_completions:
return completions
return completions, sublime.INHIBIT_WORD_COMPLETIONS
def sort_and_uniq_completions():
for name in name_to_completions:
name_to_completions[name] = list(set(name_to_completions[name]))
name_to_completions[name].sort()
sort_and_uniq_completions()
# This is identical to the property names list, except that it does not insert
# the trailing semicolon.
supports_conditions = []
def populate_supports_conditions_list():
"""Populate the completions list for @supports conditions.
An @supports condition is looks like this: (display: flex). All the property
names should be offered as completions, but the inserted text should not
have a trailing semicolon.
"""
global supports_conditions
old_suffix = r"${1};"
new_suffix = r"${0}"
for label, completion in names:
# strip trailing semicolon and replace ${1} with ${0}
if completion.endswith(old_suffix):
completion = completion[: -len(old_suffix)] + new_suffix
supports_conditions.append((label, completion))
populate_supports_conditions_list()
|
sympy/stats/tests/test_compound_rv.py | shilpiprd/sympy | 8,323 | 12609528 | <gh_stars>1000+
from sympy import (symbols, S, erf, sqrt, pi, exp, gamma, Interval, oo, beta,
Eq, Piecewise, Integral, Abs, arg, Dummy, Sum, factorial)
from sympy.stats import (Normal, P, E, density, Gamma, Poisson, Rayleigh,
variance, Bernoulli, Beta, Uniform, cdf)
from sympy.stats.compound_rv import CompoundDistribution, CompoundPSpace
from sympy.stats.crv_types import NormalDistribution
from sympy.stats.drv_types import PoissonDistribution
from sympy.stats.frv_types import BernoulliDistribution
from sympy.testing.pytest import raises, ignore_warnings
from sympy.stats.joint_rv_types import MultivariateNormalDistribution
x = symbols('x')
def test_normal_CompoundDist():
X = Normal('X', 1, 2)
Y = Normal('X', X, 4)
assert density(Y)(x).simplify() == sqrt(10)*exp(-x**2/40 + x/20 - S(1)/40)/(20*sqrt(pi))
assert E(Y) == 1 # it is always equal to mean of X
assert P(Y > 1) == S(1)/2 # as 1 is the mean
assert P(Y > 5).simplify() == S(1)/2 - erf(sqrt(10)/5)/2
assert variance(Y) == variance(X) + 4**2 # 2**2 + 4**2
# https://math.stackexchange.com/questions/1484451/
# (Contains proof of E and variance computation)
def test_poisson_CompoundDist():
k, t, y = symbols('k t y', positive=True, real=True)
G = Gamma('G', k, t)
D = Poisson('P', G)
assert density(D)(y).simplify() == t**y*(t + 1)**(-k - y)*gamma(k + y)/(gamma(k)*gamma(y + 1))
# https://en.wikipedia.org/wiki/Negative_binomial_distribution#Gamma%E2%80%93Poisson_mixture
assert E(D).simplify() == k*t # mean of NegativeBinomialDistribution
def test_bernoulli_CompoundDist():
X = Beta('X', 1, 2)
Y = Bernoulli('Y', X)
assert density(Y).dict == {0: S(2)/3, 1: S(1)/3}
assert E(Y) == P(Eq(Y, 1)) == S(1)/3
assert variance(Y) == S(2)/9
assert cdf(Y) == {0: S(2)/3, 1: 1}
# test issue 8128
a = Bernoulli('a', S(1)/2)
b = Bernoulli('b', a)
assert density(b).dict == {0: S(1)/2, 1: S(1)/2}
assert P(b > 0.5) == S(1)/2
X = Uniform('X', 0, 1)
Y = Bernoulli('Y', X)
assert E(Y) == S(1)/2
assert P(Eq(Y, 1)) == E(Y)
def test_unevaluated_CompoundDist():
# these tests need to be removed once they work with evaluation as they are currently not
# evaluated completely in sympy.
R = Rayleigh('R', 4)
X = Normal('X', 3, R)
_k = Dummy('k')
exprd = Piecewise((exp(S(3)/4 - x/4)/8, 2*Abs(arg(x - 3)) <= pi/2),
(sqrt(2)*Integral(exp(-(_k**4 + 16*(x - 3)**2)/(32*_k**2)),
(_k, 0, oo))/(32*sqrt(pi)), True))
assert (density(X)(x).simplify()).dummy_eq(exprd.simplify())
expre = Integral(_k*Integral(sqrt(2)*exp(-_k**2/32)*exp(-(_k - 3)**2/(2*_k**2)
)/(32*sqrt(pi)), (_k, 0, oo)), (_k, -oo, oo))
with ignore_warnings(UserWarning): ### TODO: Restore tests once warnings are removed
assert E(X, evaluate=False).rewrite(Integral).dummy_eq(expre)
X = Poisson('X', 1)
Y = Poisson('Y', X)
Z = Poisson('Z', Y)
exprd = exp(-1)*Sum(exp(-Y)*Y**x*Sum(exp(-X)*X**Y/(factorial(X)*factorial(Y)
), (X, 0, oo)), (Y, 0, oo))/factorial(x)
assert density(Z)(x).simplify() == exprd
N = Normal('N', 1, 2)
M = Normal('M', 3, 4)
D = Normal('D', M, N)
exprd = Integral(sqrt(2)*exp(-(_k - 1)**2/8)*Integral(exp(-(-_k + x
)**2/(2*_k**2))*exp(-(_k - 3)**2/32)/(8*pi*_k)
, (_k, -oo, oo))/(4*sqrt(pi)), (_k, -oo, oo))
assert density(D, evaluate=False)(x).dummy_eq(exprd)
def test_Compound_Distribution():
X = Normal('X', 2, 4)
N = NormalDistribution(X, 4)
C = CompoundDistribution(N)
assert C.is_Continuous
assert C.set == Interval(-oo, oo)
assert C.pdf(x, evaluate=True).simplify() == exp(-x**2/64 + x/16 - S(1)/16)/(8*sqrt(pi))
assert not isinstance(CompoundDistribution(NormalDistribution(2, 3)),
CompoundDistribution)
M = MultivariateNormalDistribution([1, 2], [[2, 1], [1, 2]])
raises(NotImplementedError, lambda: CompoundDistribution(M))
X = Beta('X', 2, 4)
B = BernoulliDistribution(X, 1, 0)
C = CompoundDistribution(B)
assert C.is_Finite
assert C.set == {0, 1}
y = symbols('y', negative=False, integer=True)
assert C.pdf(y, evaluate=True) == Piecewise((S(1)/(30*beta(2, 4)), Eq(y, 0)),
(S(1)/(60*beta(2, 4)), Eq(y, 1)), (0, True))
k, t, z = symbols('k t z', positive=True, real=True)
G = Gamma('G', k, t)
X = PoissonDistribution(G)
C = CompoundDistribution(X)
assert C.is_Discrete
assert C.set == S.Naturals0
assert C.pdf(z, evaluate=True).simplify() == t**z*(t + 1)**(-k - z)*gamma(k \
+ z)/(gamma(k)*gamma(z + 1))
def test_compound_pspace():
X = Normal('X', 2, 4)
Y = Normal('Y', 3, 6)
assert not isinstance(Y.pspace, CompoundPSpace)
N = NormalDistribution(1, 2)
D = PoissonDistribution(3)
B = BernoulliDistribution(0.2, 1, 0)
pspace1 = CompoundPSpace('N', N)
pspace2 = CompoundPSpace('D', D)
pspace3 = CompoundPSpace('B', B)
assert not isinstance(pspace1, CompoundPSpace)
assert not isinstance(pspace2, CompoundPSpace)
assert not isinstance(pspace3, CompoundPSpace)
M = MultivariateNormalDistribution([1, 2], [[2, 1], [1, 2]])
raises(ValueError, lambda: CompoundPSpace('M', M))
Y = Normal('Y', X, 6)
assert isinstance(Y.pspace, CompoundPSpace)
assert Y.pspace.distribution == CompoundDistribution(NormalDistribution(X, 6))
assert Y.pspace.domain.set == Interval(-oo, oo)
|
iot_hunter/import_data_to_es.py | byamao1/HaboMalHunter | 727 | 12609562 | <gh_stars>100-1000
#!/usr/bin/env python
# Tencent is pleased to support the open source community by making IoTHunter available.
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import sys
import os
import argparse
import json
from conf import *
from common import *
try:
from elasticsearch import Elasticsearch
from elasticsearch import helpers
except ImportError as e:
raise ImportError("\nImport faild: %s.\n" % str(e) \
+ "elasticsearch package not installed.\n" \
+ "see https://github.com/elastic/elasticsearch-py")
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
class ElasticSearchUtil():
"""
official docs : https://elasticsearch-py.readthedocs.io/
"""
def __init__(self, host = None):
self.conn = Elasticsearch(host)
self.file_lines = 0
def __del__(self):
if not self.conn:
self.conn.close()
self.file_lines = 0
def create(self, index):
return self.conn.indices.create(index, ignore = 400)
def insert(self, index, doc_type, body, id = None):
return self.conn.index(index, doc_type, body, id)
def delete_all(self, index, doc_type = None):
return self.conn.indices.delete(index)
def delete_by_id(self, index, doc_type, id):
return self.conn.delete(index, doc_type, id = id)
def delete_by_query(self, index, body, doc_type = None):
return self.conn.delete_by_query(index, body, doc_type)
def update(self, index, doc_type, body, id):
return self.conn.update(index, doc_type, body, id)
def search(self, index = None, doc_type = None, body = None):
return self.conn.search(index, doc_type, body)
def get(self, index, doc_type, id):
return self.conn.get(index, doc_type, id)
def bulk(self, action):
return helpers.bulk(self.conn, action)
def get_file_data(self, index, doc_type, file_path):
with open(file_path, 'rb') as f:
for result in f:
self.file_lines += 1
try:
res_json = json.loads(result)
yield {
"_index": index,
"_type": doc_type,
"_id": res_json["md5"],
"_source": result
}
except Exception as e:
logger.error("get_file_data():Load json error.%s" % result, exc_info=True)
def bulk_by_file(self, index, doc_type, file_path):
self.file_lines = 0
if not os.path.isfile(file_path):
logger.error("bulk_by_file():Invalid File Path: %s" % file_path)
return
logger.info("Import data from file:%s" % file_path)
ret = self.bulk(self.get_file_data(index, doc_type, file_path))
success = ret[0]
fail = self.file_lines - success
logger.info("Import data complete.Success:%d Fail:%d" % (success, fail))
def main():
parser = argparse.ArgumentParser(description = 'Import Data Tool for Elasticsearch.')
parser.add_argument('-r', dest='result_file_path', default=None, help='load analysis result info to elasticsearch')
parser.add_argument('-o', dest='output_dir', default=None, help='output folder path for log files,use RESULT_OUTPUT_DIR as default in conf.py.')
args = parser.parse_args()
while True:
output_dir = RESULT_OUTPUT_DIR
if args.output_dir:
output_dir = os.path.abspath(args.output_dir)
if not os.path.isdir(output_dir):
print ("Invalid Path: %s" % output_dir)
print ("ERROR: You need to set RESULT_OUTPUT_DIR in file conf.py or use -o to set this variable.")
break
global logger
logger = enable_logging(ES_LOGGER_NAME, os.path.join(output_dir, ES_IMPORT_DATA_LOG))
if not args.result_file_path:
parser.print_help()
break
else:
es = ElasticSearchUtil(ES_HOST)
es.bulk_by_file(ES_INDEX_NAME, ES_TYPE_NAME, args.result_file_path)
break
if __name__ == '__main__':
main()
|
cli.py | fakegit/DEXBot | 249 | 12609565 | <filename>cli.py
#!/usr/bin/env python3
from dexbot import cli
if __name__ == '__main__':
cli.main()
|
tests/blockchain/MockedBlockchain.py | fungibly/QRL | 441 | 12609589 | <reponame>fungibly/QRL
import contextlib
from math import ceil, log
from mock import mock, MagicMock, Mock
from pyqryptonight.pyqryptonight import StringToUInt256
from qrl.core import config
from qrl.core.Block import Block
from qrl.core.ChainManager import ChainManager
from qrl.core.DifficultyTracker import DifficultyTracker
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.State import State
from qrl.core.txs.SlaveTransaction import SlaveTransaction
from qrl.core.qrlnode import QRLNode
from tests.misc.helper import get_alice_xmss, get_bob_xmss, set_qrl_dir
class MockedBlockchain(object):
MAXNUMBLOCKS = 1000
def __init__(self, qrlnode, time_mock, ntp_mock):
required_height = ceil(log(self.MAXNUMBLOCKS, 2))
required_height = int(required_height + required_height % 2)
self.qrlnode = qrlnode
self.time_mock = time_mock
self.ntp_mock = ntp_mock
self.alice_xmss = get_alice_xmss(xmss_height=required_height)
self.bob_xmss = get_bob_xmss()
def create_block(self, prev_hash, mining_address=None):
if not mining_address:
mining_address = self.alice_xmss.address
transactions = []
block_prev = self.qrlnode.get_block_from_hash(prev_hash)
block_idx = block_prev.block_number + 1
if block_idx == 1:
slave_tx = SlaveTransaction.create(slave_pks=[self.bob_xmss.pk],
access_types=[0],
fee=0,
xmss_pk=self.alice_xmss.pk)
slave_tx.sign(self.alice_xmss)
slave_tx._data.nonce = 1
transactions = [slave_tx]
time_offset = 60
if block_idx % 2 == 0:
time_offset += 2
self.time_mock.return_value = self.time_mock.return_value + time_offset
self.ntp_mock.return_value = self.ntp_mock.return_value + time_offset
block_new = Block.create(dev_config=config.dev,
block_number=block_idx,
prev_headerhash=block_prev.headerhash,
prev_timestamp=block_prev.timestamp,
transactions=transactions,
miner_address=mining_address,
seed_height=0,
seed_hash=None)
dev_config = self.qrlnode._chain_manager.get_config_by_block_number(block_new.block_number)
while not self.qrlnode._chain_manager.validate_mining_nonce(blockheader=block_new.blockheader,
dev_config=dev_config):
block_new.set_nonces(config.dev, block_new.mining_nonce + 1, 0)
return block_new
def validate(self, block):
if not block.validate(self.qrlnode._chain_manager, {}):
raise Exception('Block Validation Failed')
return True
def add_block(self, block):
self.validate(block)
return self.qrlnode._chain_manager.add_block(block)
def add_new_block(self, mining_address=None):
block_prev = self.qrlnode.get_block_last()
block_new = self.create_block(prev_hash=block_prev.headerhash, mining_address=mining_address)
self.add_block(block_new)
@staticmethod
@contextlib.contextmanager
def create(num_blocks, mining_address=None):
tmp_gen = GenesisBlock()
start_time = tmp_gen.timestamp + config.dev.block_timing_in_seconds
with mock.patch('qrl.core.misc.ntp.getTime') as ntp_mock, \
set_qrl_dir('no_data'), \
State() as state, \
mock.patch('time.time') as time_mock: # noqa
time_mock.return_value = start_time
ntp_mock.return_value = start_time
state.get_measurement = MagicMock(return_value=10000000)
genesis_difficulty = config.user.genesis_difficulty
try:
config.user.genesis_difficulty = 10
genesis_block = GenesisBlock()
chain_manager = ChainManager(state)
chain_manager.load(genesis_block)
chain_manager._difficulty_tracker = Mock()
dt = DifficultyTracker()
tmp_difficulty = StringToUInt256('2')
tmp_target = dt.get_target(tmp_difficulty, config.dev)
chain_manager._difficulty_tracker.get = MagicMock(return_value=(tmp_difficulty, tmp_target))
qrlnode = QRLNode(mining_address=b'')
qrlnode.set_chain_manager(chain_manager)
mock_blockchain = MockedBlockchain(qrlnode, time_mock, ntp_mock)
for block_idx in range(1, num_blocks + 1):
mock_blockchain.add_new_block(mining_address)
yield mock_blockchain
finally:
config.user.genesis_difficulty = genesis_difficulty
|
tests/token_classification.py | Oaklight/parallelformers | 454 | 12609591 | # Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from argparse import ArgumentParser
import torch
from transformers import AutoModelForTokenClassification, AutoTokenizer
from parallelformers import parallelize
class TestForTokenClassification(unittest.TestCase):
@torch.no_grad()
def test_forward(self, model, tokens, labels):
output = model(**tokens, labels=labels).logits
print("forward:", output)
print()
assert isinstance(output, torch.Tensor)
if __name__ == "__main__":
os.environ["TOKENIZERS_PARALLELISM"] = "true"
parser = ArgumentParser()
parser.add_argument("--test-name", required=True, type=str)
parser.add_argument("--name", required=True, type=str)
parser.add_argument("--gpu-from", required=True, type=int)
parser.add_argument("--gpu-to", required=True, type=int)
parser.add_argument("--fp16", default=False, action="store_true")
parser.add_argument("--use-pf", default=False, action="store_true")
args = parser.parse_args()
model = AutoModelForTokenClassification.from_pretrained(args.name).eval()
tokenizer = AutoTokenizer.from_pretrained(args.name)
print(f"Test Name: [{model.__class__.__name__}]-[{args.test_name}]\n")
gpus = [
_
for _ in range(
args.gpu_from,
args.gpu_to + 1,
)
]
tokens = tokenizer("Hello, my dog is cute", return_tensors="pt")
labels = torch.tensor([1] * tokens["input_ids"].size(1)).unsqueeze(0)
if args.use_pf:
parallelize(
model,
num_gpus=args.gpu_to + 1,
fp16=args.fp16,
verbose="simple",
)
else:
if args.fp16:
model = model.half()
model = model.cuda()
for t in tokens:
if torch.is_tensor(tokens[t]):
tokens[t] = tokens[t].cuda()
labels = labels.cuda()
for i in gpus:
print(f"GPU {i} alloc: {torch.cuda.memory_allocated(i)}")
print(f"GPU {i} cached: { torch.cuda.memory_reserved(i)}")
print()
test = TestForTokenClassification()
test.test_forward(model, tokens, labels)
print("=========================================================")
|
SoftLayer/CLI/file/detail.py | dvzrv/softlayer-python | 126 | 12609596 | <reponame>dvzrv/softlayer-python<gh_stars>100-1000
"""Display details for a specified volume."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer import utils
@click.command()
@click.argument('volume_id')
@environment.pass_env
def cli(env, volume_id):
"""Display details for a specified volume."""
file_manager = SoftLayer.FileStorageManager(env.client)
file_volume_id = helpers.resolve_id(file_manager.resolve_ids, volume_id, 'File Storage')
file_volume = file_manager.get_file_volume_details(file_volume_id)
file_volume = utils.NestedDict(file_volume)
table = formatting.KeyValueTable(['Name', 'Value'])
table.align['Name'] = 'r'
table.align['Value'] = 'l'
storage_type = file_volume['storageType']['keyName'].split('_').pop(0)
table.add_row(['ID', file_volume['id']])
table.add_row(['Username', file_volume['username']])
table.add_row(['Type', storage_type])
table.add_row(['Capacity (GB)', "%iGB" % file_volume['capacityGb']])
used_space = int(file_volume['bytesUsed']) \
if file_volume['bytesUsed'] else 0
if used_space < (1 << 10):
table.add_row(['Used Space', "%dB" % used_space])
elif used_space < (1 << 20):
table.add_row(['Used Space', "%dKB" % (used_space / (1 << 10))])
elif used_space < (1 << 30):
table.add_row(['Used Space', "%dMB" % (used_space / (1 << 20))])
else:
table.add_row(['Used Space', "%dGB" % (used_space / (1 << 30))])
if file_volume.get('provisionedIops'):
table.add_row(['IOPs', float(file_volume['provisionedIops'])])
if file_volume.get('storageTierLevel'):
table.add_row([
'Endurance Tier',
file_volume['storageTierLevel'],
])
table.add_row([
'Data Center',
file_volume['serviceResource']['datacenter']['name'],
])
table.add_row([
'Target IP',
file_volume['serviceResourceBackendIpAddress'],
])
if file_volume['fileNetworkMountAddress']:
table.add_row([
'Mount Address',
file_volume['fileNetworkMountAddress'],
])
if file_volume['snapshotCapacityGb']:
table.add_row([
'Snapshot Capacity (GB)',
file_volume['snapshotCapacityGb'],
])
if 'snapshotSizeBytes' in file_volume['parentVolume']:
table.add_row([
'Snapshot Used (Bytes)',
file_volume['parentVolume']['snapshotSizeBytes'],
])
table.add_row(['# of Active Transactions', "%i"
% file_volume['activeTransactionCount']])
if file_volume['activeTransactions']:
for trans in file_volume['activeTransactions']:
if 'transactionStatus' in trans and 'friendlyName' in trans['transactionStatus']:
table.add_row(['Ongoing Transaction', trans['transactionStatus']['friendlyName']])
table.add_row(['Replicant Count', "%u" % file_volume.get('replicationPartnerCount', 0)])
if file_volume['replicationPartnerCount'] > 0:
# This if/else temporarily handles a bug in which the SL API
# returns a string or object for 'replicationStatus'; it seems that
# the type is string for File volumes and object for Block volumes
if 'message' in file_volume['replicationStatus']:
table.add_row(['Replication Status', "%s"
% file_volume['replicationStatus']['message']])
else:
table.add_row(['Replication Status', "%s"
% file_volume['replicationStatus']])
replicant_list = []
for replicant in file_volume['replicationPartners']:
replicant_table = formatting.Table(['Replicant ID',
replicant['id']])
replicant_table.add_row([
'Volume Name',
utils.lookup(replicant, 'username')])
replicant_table.add_row([
'Target IP',
utils.lookup(replicant, 'serviceResourceBackendIpAddress')])
replicant_table.add_row([
'Data Center',
utils.lookup(replicant,
'serviceResource', 'datacenter', 'name')])
replicant_table.add_row([
'Schedule',
utils.lookup(replicant,
'replicationSchedule', 'type', 'keyname')])
replicant_list.append(replicant_table)
table.add_row(['Replicant Volumes', replicant_list])
if file_volume.get('originalVolumeSize'):
original_volume_info = formatting.Table(['Property', 'Value'])
original_volume_info.add_row(['Original Volume Size', file_volume['originalVolumeSize']])
if file_volume.get('originalVolumeName'):
original_volume_info.add_row(['Original Volume Name', file_volume['originalVolumeName']])
if file_volume.get('originalSnapshotName'):
original_volume_info.add_row(['Original Snapshot Name', file_volume['originalSnapshotName']])
table.add_row(['Original Volume Properties', original_volume_info])
notes = '{}'.format(file_volume.get('notes', ''))
table.add_row(['Notes', notes])
env.fout(table)
|
tests/test_version.py | JerryX1110/VFS | 549 | 12609597 | import mmaction
def test_version():
version = mmaction.__version__
assert isinstance(version, str)
assert isinstance(mmaction.short_version, str)
assert mmaction.short_version in version and '+' in version
|
gdal/swig/python/gdal-utils/osgeo_utils/samples/gdal_rm.py | freespace/gdal | 3,100 | 12609629 | #!/usr/bin/env python3
###############################################################################
# $Id$
#
# Project: GDAL samples
# Purpose: Delete a virtual file
# Author: <NAME> <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2017, <NAME> <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import gdal
def Usage():
print('Usage: gdal_rm [-r] filename')
return -1
def gdal_rm_recurse(filename, simulate=False):
delete_self = True
if filename.endswith('/*'):
delete_self = False
filename = filename[0:-2]
dir_contents = gdal.ReadDir(filename)
if dir_contents:
for f in dir_contents:
if f not in ('.', '..'):
ret = gdal_rm_recurse(filename + '/' + f, simulate=simulate)
if ret != 0:
return ret
if not delete_self:
return 0
elif simulate:
print('Rmdir(%s)' % filename)
return 0
else:
ret = gdal.Rmdir(filename)
# Some filesystems, like /vsiaz/ don't have a real directory
# implementation. As soon as you remove the last file in the dir,
# the dir "disappears".
if ret < 0:
if gdal.VSIStatL(filename) is None:
ret = 0
return ret
else:
if simulate:
print('Unlink(%s)' % filename)
return 0
return gdal.Unlink(filename)
def gdal_rm(argv, progress=None):
# pylint: disable=unused-argument
filename = None
recurse = False
simulate = False
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return -1
for i in range(1, len(argv)):
if not argv[i]:
return Usage()
if argv[i] == '-r':
recurse = True
elif argv[i] == '-simulate':
simulate = True
elif argv[i][0] == '-':
print('Unexpected option : %s' % argv[i])
return Usage()
elif filename is None:
filename = argv[i]
else:
print('Unexpected option : %s' % argv[i])
return Usage()
if filename is None:
return Usage()
if filename == '/':
user_input = input('Please confirm with YES your action: ')
if user_input != 'YES':
print('Aborted')
return 1
if recurse:
ret = gdal_rm_recurse(filename, simulate=simulate)
else:
if simulate:
print('gdal.Unlink(%s)' % filename)
ret = 0
else:
ret = gdal.Unlink(filename)
if ret != 0:
print('Deletion failed')
return ret
def main(argv):
return gdal_rm(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
aiida/storage/psql_dos/orm/convert.py | mkrack/aiida-core | 153 | 12609646 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Module to get the backend instance from the Models instance
"""
from functools import singledispatch
from aiida.storage.psql_dos.models.authinfo import DbAuthInfo
from aiida.storage.psql_dos.models.comment import DbComment
from aiida.storage.psql_dos.models.computer import DbComputer
from aiida.storage.psql_dos.models.group import DbGroup
from aiida.storage.psql_dos.models.log import DbLog
from aiida.storage.psql_dos.models.node import DbLink, DbNode
from aiida.storage.psql_dos.models.user import DbUser
# pylint: disable=cyclic-import
#####################################################################
# Singledispatch to get the backend instance from the Models instance
#####################################################################
@singledispatch
def get_backend_entity(dbmodel, backend): # pylint: disable=unused-argument
"""
Default get_backend_entity
"""
raise TypeError(f"No corresponding AiiDA backend class exists for the model class '{dbmodel.__class__.__name__}'")
################################
# Singledispatch for SQLA Models
################################
@get_backend_entity.register(DbUser)
def _(dbmodel, backend):
"""
get_backend_entity for SQLA DbUser
"""
from . import users
return users.SqlaUser.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbGroup)
def _(dbmodel, backend):
"""
get_backend_entity for SQLA DbGroup
"""
from . import groups
return groups.SqlaGroup.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbComputer)
def _(dbmodel, backend):
"""
get_backend_entity for SQLA DbGroup
"""
from . import computers
return computers.SqlaComputer.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbNode)
def _(dbmodel, backend):
"""
get_backend_entity for SQLA DbNode. It will return an ORM instance since
there is not Node backend entity yet.
"""
from . import nodes
return nodes.SqlaNode.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbAuthInfo)
def _(dbmodel, backend):
"""
get_backend_entity for SQLA DbAuthInfo
"""
from . import authinfos
return authinfos.SqlaAuthInfo.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbComment)
def _(dbmodel, backend):
"""
Get the comment from the model
"""
from . import comments
return comments.SqlaComment.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbLog)
def _(dbmodel, backend):
"""
Get the comment from the model
"""
from . import logs
return logs.SqlaLog.from_dbmodel(dbmodel, backend)
@get_backend_entity.register(DbLink)
def _(dbmodel, backend):
"""
Convert a dblink to the backend entity
"""
from aiida.orm.utils.links import LinkQuadruple
return LinkQuadruple(dbmodel.input_id, dbmodel.output_id, dbmodel.type, dbmodel.label)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.