id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
466980
|
import collections
import re
import inspect
import copy
from functools import wraps
from flask import request
from flask_restful import Resource, reqparse, inputs
# python3 compatibility
try:
basestring
except NameError:
basestring = str
class ValidationError(ValueError):
pass
def auth(api_key, endpoint, method):
"""Override this function in your application.
If this function returns False, 401 forbidden is raised and the documentation is not visible.
"""
return True
def _auth(*args, **kwargs):
return auth(*args, **kwargs)
def create_swagger_endpoint(swagger_object):
"""Creates a flask_restful api endpoint for the swagger spec"""
class SwaggerEndpoint(Resource):
def get(self):
swagger_doc = {}
# filter keys with empty values
for k, v in swagger_object.items():
if v or k == 'paths':
if k == 'paths':
paths = {}
for endpoint, view in v.items():
views = {}
for method, docs in view.items():
# check permissions. If a user has not access to an api, do not show the docs of it
if auth(request.args.get('api_key'), endpoint, method):
views[method] = docs
if views:
paths[endpoint] = views
swagger_doc['paths'] = collections.OrderedDict(sorted(paths.items()))
else:
swagger_doc[k] = v
return swagger_doc
return SwaggerEndpoint
def set_nested(d, key_spec, value):
"""
Sets a value in a nested dictionary.
:param d: The dictionary to set
:param key_spec: The key specifier in dotted notation
:param value: The value to set
"""
keys = key_spec.split('.')
for key in keys[:-1]:
d = d.setdefault(key, {})
d[keys[-1]] = value
def add_parameters(swagger_object, parameters):
"""
Populates a swagger document with parameters.
:param parameters: A collection of parameters to add
:param swagger_object: The swagger document to add parameters to
"""
# A list of accepted parameters. The first item in the tuple is the
# name of keyword argument, the second item is the default value,
# and the third item is the key name in the swagger object.
fields = [
('title', '', 'info.title'),
('description', '', 'info.description'),
('terms', '', 'info.termsOfService'),
('api_version', '', 'info.version'),
('contact', {}, 'info.contact'),
('license', {}, 'info.license'),
('host', '', 'host'),
('base_path', '', 'basePath'),
('schemes', [], 'schemes'),
('consumes', [], 'consumes'),
('produces', [], 'produces'),
('parameters', {}, 'parameters'),
('responses', {}, 'responses'),
('security_definitions', {}, 'securityDefinitions'),
('security', [], 'security'),
('tags', [], 'tags'),
('external_docs', {}, 'externalDocs'),
]
for field in fields:
value = parameters.pop(field[0], field[1])
if value:
set_nested(swagger_object, field[2], value)
def get_data_type(param):
"""
Maps swagger data types to Python types.
:param param: swagger parameter
:return: Python type
"""
param_type = param.get('type', None)
if param_type:
if param_type == 'array':
param = param['items']
param_type = param['type']
if param_type == 'string':
param_format = param.get('format', None)
if param_format == 'date':
return inputs.date
elif param_format == 'date-time':
return inputs.datetime_from_iso8601
return str
elif param_type == 'integer':
return int
elif param_type == 'boolean':
return inputs.boolean
elif param_type == 'number':
param_format = param.get('format', None)
if param_format == 'float' or param_format == 'double':
return float
return None
def get_data_action(param):
param_type = param.get('type', None)
if param_type == 'array':
return 'append'
return 'store'
def get_parser_arg(param):
"""
Return an argument for the request parser.
:param param: Swagger document parameter
:return: Request parser argument
"""
return (
param['name'],
{
'dest': param['name'],
'type': get_data_type(param),
'location': 'args',
'help': param.get('description', None),
'required': param.get('required', False),
'default': param.get('default', None),
'action': get_data_action(param)
})
def get_parser_args(params):
"""
Return a list of arguments for the request parser.
:param params: Swagger document parameters
:return: Request parser arguments
"""
return [get_parser_arg(p) for p in params if p['in'] == 'query']
def get_parser(params):
"""
Returns a parser for query parameters from swagger document parameters.
:param params: swagger doc parameters
:return: Query parameter parser
"""
parser = reqparse.RequestParser()
for arg in get_parser_args(params):
parser.add_argument(arg[0], **arg[1])
return parser
def doc(operation_object):
"""Decorator to save the documentation of an api endpoint.
Saves the passed arguments as an attribute to use them later when generating the swagger spec.
"""
def decorated(f):
f.__swagger_operation_object = copy.deepcopy(operation_object)
@wraps(f)
def inner(self, *args, **kwargs):
# Get names of resource function arguments
if hasattr(inspect, 'signature'):
func_args = list(inspect.signature(f).parameters.keys())
else:
func_args = inspect.getargspec(f)[0]
# Add a parser for query arguments if the special argument '_parser' is present
if 'parameters' in f.__swagger_operation_object and '_parser' in func_args:
kwargs.update({'_parser': get_parser(f.__swagger_operation_object['parameters'])})
return f(self, *args, **kwargs)
return inner
return decorated
def validate_path_item_object(path_item_object):
"""Checks if the passed object is valid according to http://swagger.io/specification/#pathItemObject"""
for k, v in path_item_object.items():
if k in ['$ref']:
continue
if k in ['get', 'put', 'post', 'delete', 'options', 'head', 'patch']:
validate_operation_object(v)
continue
if k in ['parameters']:
for parameter in v:
try:
validate_reference_object(parameter)
except ValidationError:
validate_parameter_object(parameter)
continue
if k.startswith('x-'):
continue
raise ValidationError('Invalid path item object. Unknown field "{field}". See {url}'.format(
field=k,
url='http://swagger.io/specification/#pathItemObject'))
def validate_operation_object(operation_object):
for k, v in operation_object.items():
if k in ['tags', 'consumes', 'produces', 'schemes']:
if isinstance(v, list):
continue
raise ValidationError('Invalid operation object. "{0}" must be a list but was "{1}"', k, type(v))
if k in ['deprecated']:
if isinstance(v, bool):
continue
raise ValidationError('Invalid operation object. "{0}" must be a bool but was "{1}"', k, type(v))
if k in ['summary', 'description', 'operationId']:
if isinstance(v, basestring):
continue
raise ValidationError('Invalid operation object. "{0}" must be a string but was "{1}"', k, type(v))
if k in ['externalDocs']:
validate_external_documentation_object(v)
continue
if k in ['parameters']:
for parameter in v:
try:
validate_reference_object(parameter)
except ValidationError:
validate_parameter_object(parameter)
continue
if k in ['responses']:
validate_responses_object(v)
continue
if k in ['security']:
validate_security_requirement_object(v)
continue
if k.startswith('x-'):
continue
raise ValidationError('Invalid operation object. Unknown field "{field}". See {url}'.format(
field=k,
url='http://swagger.io/specification/#pathItemObject'))
if 'responses' not in operation_object:
raise ValidationError('Invalid operation object. Missing field "responses"')
def validate_parameter_object(parameter_object):
for k, v in parameter_object.items():
if k not in ['name', 'in', 'description', 'required', 'schema', 'type', 'format', 'allowEmptyValue', 'items',
'collectionFormat', 'default', 'maximum', 'exclusiveMaximum', 'minimum', 'exclusiveMinimum',
'maxLength', 'minLength', 'pattern', 'maxItems', 'minItems', 'uniqueItems', 'enum', 'multipleOf',
'reqparser']:
raise ValidationError('Invalid parameter object. Unknown field "{field}". See {url}'.format(
field=k,
url='http://swagger.io/specification/#parameterObject'))
if 'reqparser' in parameter_object:
if 'name' not in parameter_object:
raise ValidationError('name for request parser not specified')
if 'parser' not in parameter_object or not isinstance(parameter_object['parser'], reqparse.RequestParser):
raise ValidationError('RequestParser object not specified')
return
if 'name' not in parameter_object:
raise ValidationError('Invalid parameter object. Missing field "name"')
if 'in' not in parameter_object:
raise ValidationError('Invalid parameter object. Missing field "in"')
else:
if parameter_object['in'] not in ['path', 'query', 'header', 'body', 'formData']:
raise ValidationError(
'Invalid parameter object. Value of field "in" must be path, query, header, body or formData, was "{0}"'.format(
parameter_object['in']))
if parameter_object['in'] == 'body':
if 'schema' not in parameter_object:
raise ValidationError('Invalid parameter object. Missing field "schema"')
else:
if 'type' not in parameter_object:
raise ValidationError('Invalid parameter object. Missing field "type"')
if parameter_object['type'] == 'array':
if 'items' not in parameter_object:
raise ValidationError('Invalid parameter object. Missing field "items"')
def validate_reference_object(parameter_object):
if len(parameter_object.keys()) > 1 or '$ref' not in parameter_object:
raise ValidationError('Invalid reference object. It may only contain key "$ref"')
def validate_external_documentation_object(external_documentation_object):
pass
def validate_responses_object(responses_object):
for k, v in responses_object.items():
if k.startswith('x-'):
continue
try:
validate_reference_object(v)
except ValidationError:
validate_response_object(v)
def validate_response_object(response_object):
for k, v in response_object.items():
if k == 'description':
continue
if k == 'schema':
validate_schema_object(v)
continue
if k == 'headers':
validate_headers_object(v)
continue
if k == 'examples':
validate_example_object(v)
continue
if k.startswith('x-'):
continue
raise ValidationError('Invalid response object. Unknown field "{field}". See {url}'.format(
field=k,
url='http://swagger.io/specification/#responseObject'))
if 'description' not in response_object:
raise ValidationError('Invalid response object. Missing field "description"')
def validate_security_requirement_object(security_requirement_object):
pass
def validate_definitions_object(definition_object):
for k, v in definition_object.items():
validate_schema_object(v)
def validate_schema_object(schema_object):
for k, v in schema_object.items():
if k == 'required' and not isinstance(v, list):
raise ValidationError('Invalid schema object. "{0}" must be a list but was {1}'.format(k, type(v)))
def validate_headers_object(headers_object):
pass
def validate_example_object(example_object):
pass
def extract_swagger_path(path):
"""
Extracts a swagger type path from the given flask style path.
This /path/<parameter> turns into this /path/{parameter}
And this /<string(length=2):lang_code>/<string:id>/<float:probability>
to this: /{lang_code}/{id}/{probability}
"""
return re.sub('<(?:[^:]+:)?([^>]+)>', '{\\1}', path)
def sanitize_doc(comment):
"""
Substitute HTML breaks for new lines in comment text.
:param comment: The comment text
:return: Sanitized comment text
"""
if isinstance(comment, list):
return sanitize_doc('\n'.join(filter(None, comment)))
else:
return comment.replace('\n', '<br/>') if comment else comment
def parse_method_doc(method, operation):
"""
Parse documentation from a resource method.
:param method: The resource method
:param operation: The operation document
:return: The operation summary
"""
summary = None
full_doc = inspect.getdoc(method)
if full_doc:
lines = full_doc.split('\n')
if lines:
# Append the first line of the docstring to any summary specified
# in the operation document
summary = sanitize_doc([operation.get('summary', None), lines[0]])
return summary
def parse_schema_doc(cls, definition):
"""
Parse documentation from a schema class.
:param cls: The schema class
:param definition: The schema definition
:return: The schema description
"""
description = None
# Skip processing the docstring of the schema class if the schema
# definition already contains a description
if 'description' not in definition:
full_doc = inspect.getdoc(cls)
# Avoid returning the docstring of the base dict class
if full_doc and full_doc != inspect.getdoc(dict):
lines = full_doc.split('\n')
if lines:
# Use the first line of the class docstring as the description
description = sanitize_doc(lines[0])
return description
|
467047
|
import dataclasses
import datetime
import json
from decimal import Decimal
from typing import Iterator, Any, Union
INFINITY = float('inf')
class Decoder(json.JSONDecoder):
def __init__(self):
super().__init__(parse_int=Decimal, parse_float=Decimal)
class Encoder(json.JSONEncoder):
def __init__(self, encode_dataclasses: bool = True, ultimate_fallback=str, **kwargs):
super().__init__(**kwargs)
self._encode_dataclasses = encode_dataclasses
self._ultimate_fallback = ultimate_fallback
# noinspection PyMethodMayBeStatic
def default_raw(self, _o: Any) -> Union[str, type(None)]:
return None
def encode(self, o: Any) -> str:
pieces = self.iterencode(o)
return "".join(list(pieces))
def iterencode(self, obj: Any, **kwargs) -> Iterator[str]:
def floatstr(
o,
allow_nan=self.allow_nan,
# check for specials. this type of test is platform-specific, so do tests which don't depend internals.
_repr=repr,
_inf=INFINITY,
_neginf=-INFINITY
):
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
return _make_iterencode(
encode_str=super().encode,
encode_int=int.__repr__,
encode_float=floatstr,
encode_dataclasses=self._encode_dataclasses,
encode_other_raw=self.default_raw,
encode_other=self.default,
ultimate_fallback=self._ultimate_fallback,
)(obj)
# noinspection PyShadowingBuiltins
def _make_iterencode(
encode_str,
encode_int,
encode_float,
encode_dataclasses,
encode_other_raw,
encode_other,
ultimate_fallback,
# HACK: hand-optimized bytecode; turn globals into locals
iter=iter,
next=next,
isinstance=isinstance,
str=str,
list=list,
int=int,
float=float,
dec=Decimal,
):
def _iterencode_dict(o):
it = iter(o.items())
if not o:
yield '{}'
return
yield '{'
key, value = next(it)
yield encode_str(str(key))
yield ':'
yield from _iterencode(value)
for key, value in it:
yield ','
yield encode_str(str(key))
yield ':'
yield from _iterencode(value)
yield '}'
def _iterencode_list(o):
if not o:
yield '[]'
return
yield '['
it = iter(o)
item = next(it)
yield from _iterencode(item)
for item in it:
yield ','
yield from _iterencode(item)
yield ']'
def _iterencode(o):
if isinstance(o, str):
yield encode_str(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, list):
yield from _iterencode_list(o)
elif isinstance(o, dec):
yield str(o)
elif isinstance(o, int):
yield encode_int(o)
elif isinstance(o, float):
yield encode_float(o)
elif isinstance(o, (datetime.date, datetime.datetime)):
yield encode_str(str(o))
else:
try:
yield from _iterencode_dict(o)
except (TypeError, AttributeError):
if encode_dataclasses and dataclasses.is_dataclass(o):
yield from _iterencode_dict(o.__dict__)
else:
try:
raw = encode_other_raw(o)
if raw is not None:
yield raw
else:
yield from _iterencode(encode_other(o))
except TypeError as exc:
if ultimate_fallback is not None:
yield from _iterencode(ultimate_fallback(o))
else:
raise exc from None
return _iterencode
def dumpjson(obj, **kwargs) -> str:
return json.dumps(obj, cls=Encoder, **kwargs)
def loadjson(s):
return json.loads(s)
def loadjson_all(s):
dec = Decoder()
data = s.strip()
while data:
obj, ix = dec.raw_decode(data)
yield obj
data = data[ix:].lstrip()
|
467083
|
import os
####################################################
# Extract value from split list of data
####################################################
def get_value(lst, row_name, idx):
"""
:param lst: data list, each entry is another list with whitespace separated data
:param row_name: name of the row to find data
:param idx: numeric index of desired value
:return: value
"""
val = None
for l in lst:
if not l:
continue
if l[0] == row_name:
try:
val = l[idx]
except Exception:
print row_name, idx
print lst
val = None
break
return val
####################################################
# ICS Information extraction
####################################################
scanner_map = {
('A', 'DBA'): 'MGH',
('A', 'HOWTEK'): 'MGH',
('B', 'LUMISYS'): 'WFU',
('C', 'LUMISYS'): 'WFU',
('D', 'HOWTEK'): 'ISMD'
}
def get_ics_info(ics_file_path):
"""
:param ics_file_path: path to ics file
:return: dictionary containing all relevant data in ics file
"""
# get letter for scanner type
ics_file_name = os.path.basename(ics_file_path)
letter = ics_file_name[0]
# get data from ics file
with open(ics_file_path, 'r') as f:
lines = map(lambda s: s.strip().split(), f.readlines())
# map ics data to values
ics_dict = {
'patient_id': get_value(lines, 'filename', 1),
'age': get_value(lines, 'PATIENT_AGE', 1),
'scanner_type': get_value(lines, 'DIGITIZER', 1),
'scan_institution': scanner_map[(letter, get_value(lines, 'DIGITIZER', 1))],
'density': get_value(lines, 'DENSITY', 1)
}
for sequence in ['LEFT_CC', 'RIGHT_CC', 'LEFT_MLO', 'RIGHT_MLO']:
if get_value(lines, sequence, 0) is None:
continue
sequence_dict = {
'height': int(get_value(lines, sequence, 2)),
'width': int(get_value(lines, sequence, 4)),
'bpp': int(get_value(lines, sequence, 6)),
'resolution': float(get_value(lines, sequence, 8))
}
ics_dict[sequence] = sequence_dict
return ics_dict
####################################################
# Overlay Information extraction
####################################################
def get_abnormality_data(file_name):
"""
:param file_name: file path of overlay file
:return: data about abnormality
"""
# read lines, strip newlines, split them by whitespace, remove empty lines
with open(file_name, 'r') as file_ptr:
lines = map(lambda s: s.strip().split(), file_ptr.readlines())
lines = filter(lambda l: l != [], lines)
try:
total_abnormalities = int(lines[0][1])
except:
total_abnormalities = 0
if total_abnormalities == 0:
return []
# get index of all lines that start a new abnormality
try:
abnormal_idx = [idx for idx, l in enumerate(lines) if l[0].find("ABNORMALITY") == 0]
abnormal_idx.append(len(lines))
except:
print "Funky Formatted File!\n {0}".format(file_name)
return []
abnormality_data = []
for idx in range(len(abnormal_idx) - 1):
lesion_data = lines[abnormal_idx[idx]:abnormal_idx[idx + 1]]
lesion_type = lesion_data[1][1].lower()
abnormality_data.append((file_name, lesion_type, lesion_data))
return abnormality_data
|
467096
|
import json
import math
import sys
from collections import deque
from glob import glob
from typing import Optional
import cv2
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# use the master branch of nuscenes-devkit instead of pip installed version
import torch
from fire import Fire
from nuscenes.map_expansion.arcline_path_utils import discretize_lane
from nuscenes.map_expansion.map_api import NuScenesMap
from sklearn.neighbors import KDTree
from sdriving.nuscenes.utils import (
get_drivable_area_matrix,
nuscenes_map_to_line_representation,
)
from sdriving.nuscenes.world import NuscenesWorld
from sdriving.tsim import angle_normalize
def get_nusc_maps(map_folder):
nusc_maps = {
map_name: NuScenesMap(dataroot=map_folder, map_name=map_name)
for map_name in [
"singapore-hollandvillage",
"singapore-queenstown",
"boston-seaport",
"singapore-onenorth",
]
}
return nusc_maps
def get_road_img(nmap, midx, midy, width, height, resolution):
dx, bx, (nx, ny) = get_grid(
[-width / 2, -height / 2, width / 2, height / 2],
[resolution, resolution],
)
layers = ["road_segment", "lane"]
lmap = get_local_map(nmap, (midx, midy), width, height, layers)
road_img = np.zeros((nx, ny))
for layer_name in layers:
for poly in lmap[layer_name]:
# draw the lines
pts = np.round(
(poly - np.array([[midx, midy]]) - bx[:2] + dx[:2] / 2.0)
/ dx[:2]
).astype(np.int32)
pts[:, [1, 0]] = pts[:, [0, 1]]
cv2.fillPoly(road_img, [pts], 1.0)
return road_img, dx, bx, nx, ny
class MapHelper(object):
def __init__(self, nusc_maps):
# map_name -> nuscenesMap
self.nusc_maps = nusc_maps
self.info, self.trees = self.prepro_closest()
def prepro_closest(self):
print("preprocessing maps...")
info = {}
trees = {}
for map_name, nmap in self.nusc_maps.items():
info[map_name] = {}
for lane in nmap.lane + nmap.lane_connector:
my_lane = nmap.arcline_path_3.get(lane["token"], [])
info[map_name][lane["token"]] = discretize_lane(
my_lane, resolution_meters=0.5
)
assert len(info[map_name][lane["token"]]) > 0
data = np.array(
[pt for lane in info[map_name] for pt in info[map_name][lane]]
)[:, :2]
keys = [
(map_name, lane, pti)
for lane in info[map_name]
for pti, pt in enumerate(info[map_name][lane])
]
tree = KDTree(data)
trees[map_name] = {"tree": tree, "keys": keys}
print("done!")
return info, trees
def closest(self, map_name, x, y):
distance, index = self.trees[map_name]["tree"].query([[x, y]])
_, lane_key, pti = self.trees[map_name]["keys"][index[0, 0]]
pt = self.info[map_name][lane_key][pti]
return pt, lane_key, pti
def bfs(self, map_name, src, tgt):
nmap = self.nusc_maps[map_name]
queue = deque()
queue.append(src)
tree = {}
tree[src] = "root"
while queue:
s = queue.popleft()
for i in nmap.connectivity[s]["outgoing"]:
# some ghost lanes to avoid https://github.com/nutonomy/nuscenes-devkit/issues/415
if i not in nmap.connectivity:
continue
if i not in tree:
queue.append(i)
tree[i] = s
if tgt in tree:
break
# path exists
if tgt in tree:
path = [tgt]
while path[-1] != src:
path.append(tree[path[-1]])
full_path = list(reversed(path))
# no path exists
else:
full_path = None
return full_path, tree
def get_lane_path(self, map_name, p0, p1):
closep0, lane_key0, pti0 = self.closest(map_name, p0[0], p0[1])
closep1, lane_key1, pti1 = self.closest(map_name, p1[0], p1[1])
path, tree = self.bfs(map_name, lane_key0, lane_key1)
if path is None:
pts = None
elif len(path) == 1:
pts = self.info[map_name][lane_key0][pti0 : (pti1 + 1)]
else:
pts = self.info[map_name][lane_key0][pti0:]
for k in path[1:-1]:
pts.extend(self.info[map_name][k])
pts.extend(self.info[map_name][lane_key1][: (pti1 + 1)])
return pts
def check_in_box(self, pt, center, width, height):
return (abs(pt[0] - center[0]) < width / 2) and (
abs(pt[1] - center[1]) < height / 2
)
def collect_paths(self, map_name, starts, center, width, height):
"""Given Nx2 start positions, find all paths that leave those positions
and end somewhere within the box defined by center, width, height.
"""
nmap = self.nusc_maps[map_name]
all_paths = {}
for starti, start in enumerate(starts):
all_paths[starti] = []
_, lane_key, pti = self.closest(map_name, start[0], start[1])
# point-wise BFS
queue = deque()
src = (lane_key, pti)
queue.append(src)
tree = {src: "root"}
endpoints = []
while queue:
la, pi = queue.popleft()
cur_len = len(queue)
if pi + 1 < len(self.info[map_name][la]):
newpt = self.info[map_name][la][pi + 1]
cand = (la, pi + 1)
if cand not in tree and self.check_in_box(
newpt, center, width, height
):
queue.append(cand)
tree[cand] = (la, pi)
else:
for i in nmap.connectivity[la]["outgoing"]:
if i not in nmap.connectivity:
continue
cand = (i, 0)
if cand not in tree:
queue.append(cand)
tree[cand] = (la, pi)
# we found an "endpoint"
if len(queue) == cur_len:
endpoints.append((la, pi))
for endpoint in endpoints:
path = [endpoint]
while path[-1] != src:
path.append(tree[path[-1]])
full_path = list(reversed(path))
all_paths[starti].append(
[self.info[map_name][la][pi] for la, pi in full_path]
)
return all_paths
def get_grid(point_cloud_range, voxel_size):
lower = np.array(point_cloud_range[: (len(point_cloud_range) // 2)])
upper = np.array(point_cloud_range[(len(point_cloud_range) // 2) :])
dx = np.array(voxel_size)
bx = lower + dx / 2.0
nx = list(map(int, (upper - lower) / dx))
return dx, bx, nx
def get_local_map(nmap, center, width, height, layer_names):
# need to get the map here...
box_coords = (
center[0] - width / 2,
center[1] - height / 2,
center[0] + width / 2,
center[1] + height / 2,
)
polys = {}
# polygons
records_in_patch = nmap.get_records_in_patch(
box_coords, layer_names=layer_names, mode="intersect"
)
for layer_name in layer_names:
polys[layer_name] = []
for token in records_in_patch[layer_name]:
poly_record = nmap.get(layer_name, token)
if layer_name == "drivable_area":
polygon_tokens = poly_record["polygon_tokens"]
else:
polygon_tokens = [poly_record["polygon_token"]]
for polygon_token in polygon_tokens:
polygon = nmap.extract_polygon(polygon_token)
polys[layer_name].append(np.array(polygon.exterior.xy).T)
return polys
def find_center(
map_folder="/Users/jonahphilion/Downloads/nuScenes-map-expansion-v1.2",
map_name="boston-seaport",
):
nusc_maps = get_nusc_maps(map_folder)
nmap = nusc_maps[map_name]
pose_lists = nmap.discretize_centerlines(resolution_meters=0.5)
def onclick(event):
print(event)
fig = plt.figure()
for pose_list in pose_lists:
if len(pose_list) > 0:
plt.plot(pose_list[:, 0], pose_list[:, 1])
ax = plt.gca()
ax.set_aspect("equal")
fig.canvas.mpl_connect("button_press_event", onclick)
plt.show()
def env_create(
map_folder="/Users/jonahphilion/Downloads/nuScenes-map-expansion-v1.2",
map_name="boston-seaport",
midx=1289.15,
midy=1049.04,
width=100.0, # meters
height=100.0, # meters
resolution=0.3,
):
mpl.use("TkAgg")
nusc_maps = get_nusc_maps(map_folder)
maphelper = MapHelper(
{k: v for k, v in nusc_maps.items() if k == map_name}
)
road_img, dx, bx, nx, ny = get_road_img(
nusc_maps[map_name], midx, midy, width, height, resolution
)
class GUI(object):
def __init__(self):
fig = plt.figure(figsize=(7, 7))
fig.canvas.mpl_connect("button_press_event", self.onclick)
fig.canvas.mpl_connect("key_press_event", self.onpress)
self.starts = []
self.traffic_signals = []
self.starts_to_traffic_signal = []
self.ts_mapping = []
self.signal_num = 0
self.map_num = -1
self.last_mapped = -1
def onclick(self, event):
print(event)
if event.button == 1:
self.starts.append([event.xdata, event.ydata])
self.render_pts()
print(self.starts)
elif event.button == 3:
self.traffic_signals.append((event.xdata, event.ydata))
self.map_num += 1
for i in range(self.last_mapped + 1, len(self.starts)):
self.starts_to_traffic_signal.append(self.map_num)
print(f"Mapped {self.starts[i]} to {self.map_num}")
self.last_mapped = len(self.starts) - 1
self.ts_mapping.append(self.signal_num)
print(f"{self.traffic_signals[-1]} --> {self.ts_mapping[-1]}")
self.render_signals()
print(self.traffic_signals)
def onpress(self, event):
print(event.key)
sys.stdout.flush()
if event.key == "1":
self.starts = self.starts[:-1]
plt.clf()
self.render()
self.render_pts()
if event.key == "o":
self.signal_num += 1
self.signal_num %= 2
print(f"Signal Flipped to {self.signal_num}")
if event.key == "2":
all_paths = maphelper.collect_paths(
map_name, self.starts, (midx, midy), width, height
)
for key in range(len(all_paths)):
paths = all_paths[key]
for pathi, path in enumerate(paths):
plt.plot(
[p[0] for p in path],
[p[1] for p in path],
f"C{pathi}",
)
plt.draw()
if event.key == "t":
all_paths = maphelper.collect_paths(
map_name, self.starts, (midx, midy), width, height
)
outname = f"{map_name}_{midx}_{midy}.json"
print(self.ts_mapping, self.starts_to_traffic_signal)
print("saving", outname)
info = {
"map_name": map_name,
"center": (midx, midy),
"width": width,
"height": height,
"all_paths": all_paths,
"starts": self.starts,
"road_img": road_img.tolist(),
"dx": dx.tolist(),
"bx": bx.tolist(),
"nx": nx,
"ny": ny,
"signal_locations": self.traffic_signals,
"mapping": self.ts_mapping,
"starts_to_signal": self.starts_to_traffic_signal,
}
with open(outname, "w") as writer:
json.dump(info, writer)
plt.title(f"saved to {outname}!")
plt.draw()
def render_pts(self):
plt.plot(
[p[0] for p in self.starts],
[p[1] for p in self.starts],
"b.",
markersize=15,
)
plt.draw()
def render_signals(self):
plt.plot(
[p[0] for p in self.traffic_signals],
[p[1] for p in self.traffic_signals],
"g.",
markersize=15,
)
plt.draw()
def render(self):
plt.plot(
maphelper.trees[map_name]["tree"].data[:, 0],
maphelper.trees[map_name]["tree"].data[:, 1],
"k.",
alpha=0.5,
)
plt.xlim((midx - width / 2, midx + width / 2))
plt.ylim((midy - height / 2, midy + height / 2))
ax = plt.gca()
ax.set_aspect("equal")
plt.draw()
gui = GUI()
gui.render()
plt.show()
def fix_json_maps(glob_path="./*.json"):
fs = glob(glob_path)
for fi, f in enumerate(fs):
print(f"Fixing {f}...")
with open(f, "r") as reader:
data = json.load(reader)
# Some splines have repeated points. Clean those up
for k, paths in data["all_paths"].items():
new_paths = []
for path in paths:
path = np.array(path)
new_paths.append(
[path[0].tolist()]
+ path[1:][
(1 - (path[1:] == path[:-1]).all(-1)).astype(np.bool)
].tolist()
)
if path.shape[0] != len(new_paths[-1]):
print(
f"[Point Cleanup] Before: {path.shape[0]} |"
f" After {len(new_paths[-1])}"
)
data["all_paths"][k] = new_paths
# Some splines merge into others. This causes issues in
# the downstream map preprocessing code. We need to fuse
# these
for starti, (key, paths) in enumerate(data["all_paths"].items()):
new_paths = []
for j, path in enumerate(paths):
path = np.array(path)[:, :2]
complete_path = path.tolist()
end = path[-1]
done = False
for i, p in enumerate(paths):
p = np.array(p)[:, :2]
if i == j:
continue
idxs = (end == p).all(axis=-1).nonzero()[0]
for idx in idxs:
if idx == p.shape[0] - 1:
continue
complete_path += p[(idx + 1) :].tolist()
print(
f"[Spline Fusion] Before: {path.shape[0]} "
f"| After: {len(complete_path)}"
)
done = True
if done:
break
new_paths.append(complete_path)
data["all_paths"][key] = new_paths
with open(f, "w") as writer:
json.dump(data, writer)
def preprocess_maps(dataroot, glob_path="./*.json"):
fs = glob(glob_path)
for fi, f in enumerate(fs):
with open(f, "r") as reader:
data = json.load(reader)
nusc_map = NuScenesMap(dataroot=dataroot, map_name=data["map_name"])
dataset = dict()
center, h, w = data["center"], data["height"], data["width"]
patch = [
center[0] - w / 2,
center[1] - h / 2,
center[0] + w / 2,
center[1] + h / 2,
]
dataset["patch"] = patch
dataset["center"] = np.array([center])
dataset["height"] = h
dataset["width"] = w
dataset["map_name"] = data["map_name"]
dataset["dx"] = np.array(data["dx"])
dataset["bx"] = np.array(data["bx"])
dataset["road_img"] = np.array(data["road_img"])
# Needed for lidar sensors
pt1, pt2 = nuscenes_map_to_line_representation(nusc_map, patch, False)
dataset["edges"] = (pt1, pt2)
drivable_area, xs, ys = get_drivable_area_matrix(data, patch, res=500)
dataset["plotting_utils"] = (
drivable_area.numpy().flatten(),
xs.numpy().flatten(),
ys.numpy().flatten(),
[
(0.5, 0.5, 0.5) if row else (1, 1, 1)
for row in drivable_area.numpy().flatten()
],
)
dataset["splines"] = dict()
signal_loc = torch.as_tensor(data["signal_locations"])
signal_color = data["mapping"]
dataset["signal_locations"] = signal_loc
dataset["color_mapping"] = signal_color
dataset["starts_to_signal"] = data["starts_to_signal"]
dataset["signal_loc"] = []
dataset["signal_color"] = []
for starti, (key, paths) in enumerate(data["all_paths"].items()):
idx = data["starts_to_signal"][starti]
loc = signal_loc[idx]
col = signal_color[idx]
dataset["signal_loc"].append(loc)
dataset["signal_color"].append(col)
dataset["splines"][starti] = dict()
for pathi, path in enumerate(paths):
path = np.array(path)
if path.shape[0] < 75:
print(
"Skipping spline as it contains very few control points"
)
continue
dataset["splines"][starti][pathi] = []
for i in range(0, 50, 10):
cps = path[
np.linspace(i, path.shape[0] - 15, 12, dtype=np.int),
:2,
]
diff = cps[0] - cps[1]
theta = np.arctan2(diff[1], diff[0])
start_orientation = (
angle_normalize(torch.as_tensor(math.pi + theta))
.float()
.reshape(1, 1)
)
if i == 0:
extra_pt1 = np.array(
[
[
cps[0, 0] + np.cos(theta) * 30.0,
cps[0, 1] + np.sin(theta) * 30.0,
]
]
)
else:
extra_pt1 = path[0:1, :2]
diff = cps[-1] - cps[-2]
theta = np.arctan2(diff[1], diff[0])
dest_orientation = (
angle_normalize(torch.as_tensor(theta))
.float()
.reshape(1, 1)
)
extra_pt2 = np.array(
[
[
cps[-1, 0] + np.cos(theta) * 30.0,
cps[-1, 1] + np.sin(theta) * 30.0,
]
]
)
cps = torch.cat(
[
torch.from_numpy(cps),
torch.from_numpy(extra_pt2),
torch.from_numpy(extra_pt1),
]
)[None, :, :].float()
start_position = cps[:, 0, :]
destination = cps[:, -3, :]
dataset["splines"][starti][pathi].append(
(
start_position,
destination,
start_orientation,
dest_orientation,
cps,
)
)
outname = f"env{data['map_name']}_{data['center'][0]}_{data['center'][1]}.pth"
print("saving", outname)
torch.save(dataset, outname)
def viz_nuscenes_world(
glob_path="./*.pth",
as_pdf: bool = True,
montage: bool = False,
save_path: Optional[str] = "all_maps.pdf",
):
# FIXME: Montage doesn't work as of now
fs = glob(glob_path)
if montage:
nrow = int(np.ceil(np.sqrt(len(fs))))
ncol = int(np.ceil(len(fs) / nrow))
fig, axs = plt.subplots(nrow, ncol, figsize=(10, 10))
i = 0
j = 0
for fi, f in enumerate(fs):
data = torch.load(f)
world = NuscenesWorld(f)
if montage:
world.render(
fig=fig, ax=axs[i, j] if not ncol == nrow == 1 else axs
)
j += 1
if j % ncol == 0:
j = 0
i += 1
else:
world.render()
world.ax.set_aspect("equal")
world.ax.grid(False)
world.ax.set_xticklabels([])
world.ax.set_yticklabels([])
plt.tight_layout()
if not montage:
if as_pdf:
world.fig.set_rasterized(True)
outname = f"env{data['map_name']}_{data['center'][0][0]}_{data['center'][0][1]}.pdf"
else:
outname = f"env{data['map_name']}_{data['center'][0][0]}_{data['center'][0][1]}.png"
print("saving", outname)
plt.savefig(outname, bbox_inches="tight")
plt.close(world.fig)
if montage:
fig.set_rasterized(True)
fig.savefig(save_path, bbox_inches="tight")
def viz_env(glob_path="./*.json"):
fs = glob(glob_path)
for fi, f in enumerate(fs):
with open(f, "r") as reader:
data = json.load(reader)
fig = plt.figure()
gs = mpl.gridspec.GridSpec(1, 1)
ax = plt.subplot(gs[0, 0])
# plot the drivable area
xs = np.array(
[
np.linspace(
data["center"][0] - data["width"] / 2 * 1.1,
data["center"][0] + data["width"] / 2 * 1.1,
100,
)
for _ in range(100)
]
).flatten()
ys = np.array(
[
np.linspace(
data["center"][1] - data["height"] / 2 * 1.1,
data["center"][1] + data["height"] / 2 * 1.1,
100,
)
for _ in range(100)
]
).T.flatten()
# index into drivable_area
drivable_area = np.array(data["road_img"])
ixes = (
np.array([xs, ys]).T
- np.array([data["center"]])
- np.array(data["bx"])[:2]
+ np.array(data["dx"])[:2] / 2.0
) / np.array(data["dx"])[:2]
ixes = ixes.astype(int)
within = np.logical_and(0 <= ixes[:, 0], ixes[:, 0] < data["nx"])
within = np.logical_and(within, 0 <= ixes[:, 1])
within = np.logical_and(within, ixes[:, 1] < data["ny"])
drivable = np.zeros(len(ixes))
drivable[within] = drivable_area[ixes[within, 0], ixes[within, 1]]
c = [(0, 1, 0) if row else (1, 0, 0) for row in drivable]
plt.scatter(xs, ys, alpha=0.05, c=c)
# plot each path
for starti, (key, paths) in enumerate(data["all_paths"].items()):
# each path emanating from this start position
for path in paths:
plt.plot(
[p[0] for p in path],
[p[1] for p in path],
c=f"C{starti}",
alpha=0.5,
)
# plot each start position (N x 2)
starts = np.array(data["starts"])
plt.plot(
starts[:, 0],
starts[:, 1],
".",
markersize=10,
label="Start Positions",
)
for loc, col in zip(data["signal_locations"], data["mapping"]):
plt.plot(
[loc[0]], [loc[1]], "g." if col == 1 else "r.", markersize=15
)
# make the window slightly larger than the actual boundaries for viz
fac = 1.1
plt.xlim(
(
data["center"][0] - data["width"] * fac / 2,
data["center"][0] + data["width"] * fac / 2,
)
)
plt.ylim(
(
data["center"][1] - data["height"] * fac / 2,
data["center"][1] + data["height"] * fac / 2,
)
)
ax.set_aspect("equal")
ax.grid(False)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.tight_layout()
outname = f"env{data['map_name']}_{data['center'][0]}_{data['center'][1]}.jpg"
print("saving", outname)
plt.savefig(outname, bbox_inches="tight")
plt.close(fig)
if __name__ == "__main__":
Fire(
{
"env_create": env_create,
"find_center": find_center,
"viz_env": viz_env,
"viz_nuscenes_world": viz_nuscenes_world,
"preprocess_maps": preprocess_maps,
"fix_json_maps": fix_json_maps,
}
)
|
467099
|
import pypy
import py
from pypy.interpreter.astcompiler.test.test_compiler import \
compile_with_astcompiler
class TestStdlib:
def check_file_compile(self, filepath):
space = self.space
print 'Compiling:', filepath
source = filepath.read()
compile_with_astcompiler(source, mode='exec', space=space)
def test_all(self):
p = py.path.local(pypy.__file__).dirpath().dirpath('lib-python',
'2.4.1')
files = p.listdir("*.py")
files.sort()
for s in files:
yield self.check_file_compile, s
|
467101
|
from terra_sdk.core import AccAddress, Coins
from ._base import BaseAsyncAPI, sync_bind
__all__ = ["AsyncBankAPI", "BankAPI"]
class AsyncBankAPI(BaseAsyncAPI):
async def balance(self, address: AccAddress) -> Coins:
"""Fetches an account's current balance.
Args:
address (AccAddress): account address
Returns:
Coins: balance
"""
res = await self._c._get(f"/bank/balances/{address}")
return Coins.from_data(res)
class BankAPI(AsyncBankAPI):
@sync_bind(AsyncBankAPI.balance)
def balance(self, address: AccAddress) -> Coins:
pass
balance.__doc__ = AsyncBankAPI.balance.__doc__
|
467103
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import gzip
import hashlib
import io
import logging
import os
import re
import sys
from collections import Counter
from collections import defaultdict
from contextlib import contextmanager
import jsonpickle
import jsonpickle.ext.numpy
import numpy as np
import pandas as pd
from parsable import parsable
from six.moves import cPickle as pickle
from six.moves import range
from six.moves import zip
from treecat.structure import find_complete_edge
from treecat.tables import TY_MULTINOMIAL
from treecat.tables import Table
logger = logging.getLogger(__name__)
parsable = parsable.Parsable()
jsonpickle.ext.numpy.register_handlers()
CATEGORICAL = 'categorical'
ORDINAL = 'ordinal'
VALID_TYPES = (CATEGORICAL, ORDINAL)
MAX_CATEGORIES = 20
NA_STRINGS = {
'null': '',
'none': '',
}
def json_dumps(value):
return jsonpickle.encode(value)
def json_loads(string):
return jsonpickle.decode(string)
def pickle_dump(data, filename):
"""Serialize data to file using gzip compression."""
if filename.endswith('.pkz'):
with gzip.open(filename, 'wb') as f:
pickle.dump(data, f, protocol=2) # Try to support python 2.
elif filename.endswith('.jz'):
with gzip.open(filename, 'wt') as f:
f.write(json_dumps(data))
else:
raise ValueError(
'Cannot determine format: {}'.format(os.path.basename(filename)))
def pickle_load(filename):
"""Deserialize data from file using gzip compression."""
if filename.endswith('.pkz'):
with gzip.open(filename, 'rb') as f:
return pickle.load(f)
elif filename.endswith('.jz'):
with gzip.open(filename, 'rt') as f:
return json_loads(f.read())
else:
raise ValueError(
'Cannot determine format: {}'.format(os.path.basename(filename)))
def fingerprint(obj):
serialized = json_dumps(obj)
with open('/tmp/v1.json', 'w') as f:
print('see /tmp/v1.json')
f.write(serialized)
hasher = hashlib.sha1()
try:
hasher.update(serialized)
except TypeError:
hasher.update(serialized.encode('utf-8'))
return hasher.hexdigest()
@contextmanager
def csv_reader(filename, encoding='utf-8'):
with io.open(filename, 'r', encoding=encoding) as f:
if sys.version_info >= (3, 0):
yield csv.reader(f)
else:
yield csv.reader(line.encode(encoding, 'ignore') for line in f)
@contextmanager
def csv_writer(filename):
with open(filename, 'w') as f:
yield csv.writer(f)
def read_csv(filename, encoding='utf-8'):
return pd.read_csv(filename, dtype=object, encoding=encoding)
def pd_outer_join(dfs, on):
"""Outer-join an iterable of pandas dataframes on a given column.
Args:
dfs: A pandas dataframe.
on: A column name or list of column names.
Returns:
A pandas dataframe whose columns are the union of columns in dfs, and
whose rows are the union of rows joined on 'on'.
"""
result = dfs[0].set_index(on)
for i, df in enumerate(dfs[1:]):
assert not any(col.endswith('_JOIN_') for col in result.columns)
result = result.join(df.set_index(on), how='outer', rsuffix='_JOIN_')
for right in result.columns:
if right.endswith('_JOIN_'):
left = right[:-6]
if left in df.columns:
result[left].fillna(result[right], inplace=True)
del result[right]
else:
result.rename(columns={right: left})
result = result.sort_index(axis=1)
return result
@parsable
def join_csvs(column,
csvs_in,
csv_out,
encoding_in='utf-8',
encoding_out='utf-8'):
"""Outer join a comma-delimited list of csvs on a given column.
Common encodings include: utf-8, cp1252.
"""
dfs = [read_csv(csv_in, encoding_in) for csv_in in csvs_in.split(',')]
df = pd_outer_join(dfs, column)
df.to_csv(csv_out, encoding=encoding_out)
def normalize_string(string):
string = re.sub(r'\s+', ' ', string.strip().lower())
return NA_STRINGS.get(string, string)
def is_small_int(value):
try:
int_value = int(value)
return 0 <= int_value and int_value <= MAX_CATEGORIES
except ValueError:
return False
def guess_feature_type(count, values):
"""Guess the type of a feature, given statistics about the feature.
Args:
count: Total number of observations of the feature.
values: A list of uniqe observed values of the feature.
Returns:
One of: 'ordinal', 'categorical', or ''
"""
if len(values) <= 1:
return '' # Feature is useless.
if len(values) <= MAX_CATEGORIES:
if all(is_small_int(v) for (v, c) in values):
return ORDINAL
if len(values) <= min(count / 2, MAX_CATEGORIES):
return CATEGORICAL
return ''
@parsable
def guess_schema(data_csvs_in, types_csv_out, values_csv_out,
encoding='utf-8'):
"""Create a best-guess types and values for a given dataset.
Common encodings include: utf-8, cp1252.
"""
print('Guessing schema')
# Collect statistics.
totals = Counter()
values = defaultdict(Counter)
feature_names = set()
sources = defaultdict(list)
for data_csv_in in data_csvs_in.split(','):
print('reading {}'.format(data_csv_in))
with csv_reader(data_csv_in, encoding) as reader:
header = list(next(reader))
feature_names |= set(header)
for name in feature_names:
sources[name].append(os.path.basename(data_csv_in))
for row in reader:
for name, value in zip(header, row):
value = normalize_string(value)
if not value:
continue
totals[name] += 1
values[name][value] += 1
uniques = defaultdict(lambda: 0)
for name, counts in values.items():
uniques[name] = len(counts)
feature_names = sorted(feature_names)
# Exclude singleton values, because they provide no statistical value
# and they often leak identifying info.
singletons = Counter()
for name in feature_names:
counts = values[name]
singles = [v for v, c in counts.items() if c == 1]
for value in singles:
del counts[value]
singletons[name] += 1
values[name] = counts.most_common(MAX_CATEGORIES)
values[name].sort(key=lambda vc: (-vc[1], vc[0])) # Brake ties.
# Guess feature types.
feature_types = [
guess_feature_type(totals[f], values[f]) for f in feature_names
]
print('Found {} features: {} categoricals + {} ordinals'.format(
len(feature_names),
sum(t == CATEGORICAL for t in feature_types),
sum(t == ORDINAL for t in feature_types)))
# Write types.
with csv_writer(types_csv_out) as writer:
writer.writerow([
'name',
'type',
'total',
'unique',
'singletons',
'source',
])
for name, typ in zip(feature_names, feature_types):
writer.writerow([
name,
typ,
totals[name],
uniques[name],
singletons[name],
','.join(sources[name]),
])
# Write values.
with csv_writer(values_csv_out) as writer:
writer.writerow(['name', 'value', 'count'])
for name, typ in zip(feature_names, feature_types):
for value, count in values[name]:
writer.writerow([name, str(value), str(count)])
def load_schema(types_csv_in, values_csv_in, groups_csv_in, encoding='utf-8'):
print('Loading schema from {}, {}, {}'.format(types_csv_in, values_csv_in,
groups_csv_in))
# Load types.
feature_names = []
feature_types = {}
with csv_reader(types_csv_in, encoding) as reader:
header = next(reader)
assert header[0].lower() == 'name'
assert header[1].lower() == 'type'
for row in reader:
if len(row) < 2:
continue
name = row[0]
typename = row[1]
if not typename:
continue
if typename not in VALID_TYPES:
raise ValueError('Invalid type: {}\n expected one of: {}'.
format(typename, ', '.join(VALID_TYPES)))
feature_names.append(name)
feature_types[name] = typename
# Load values.
categorical_values = defaultdict(list)
ordinal_values = defaultdict(list)
with csv_reader(values_csv_in, encoding) as reader:
header = next(reader)
assert header[0].lower() == 'name'
assert header[1].lower() == 'value'
for row in reader:
if len(row) < 2:
continue
name = row[0]
if name not in feature_types:
continue
value = row[1]
typename = feature_types[name]
if typename == CATEGORICAL:
categorical_values[name].append(value)
elif typename == ORDINAL:
ordinal_values[name].append(int(value))
else:
raise ValueError(typename)
print('Found {} features'.format(len(feature_names)))
if not feature_names:
raise ValueError('Found no features')
# Load optional groups.
# These are arranged as blocks from sources to targets with constant logit.
# The logits, sources, and targets can be specified in any order.
group_logits = defaultdict(lambda: 0)
group_sources = defaultdict(set)
group_targets = defaultdict(set)
if groups_csv_in:
with csv_reader(groups_csv_in, encoding) as reader:
header = next(reader)
assert header[0].lower() == 'group'
assert header[1].lower() == 'logit'
assert header[2].lower() == 'source'
assert header[3].lower() == 'target'
for row in reader:
if len(row) < 4:
continue
group, logit, source, target = row[:3]
if logit:
group_logits[group] = float(logit)
if source:
group_sources[group].add(source)
if target:
group_targets[group].add(target)
# Create value indices.
categorical_index = {}
ordinal_ranges = {}
for name, typename in feature_types.items():
if typename == CATEGORICAL:
values = tuple(categorical_values[name])
categorical_values[name] = values
categorical_index[name] = {v: i for i, v in enumerate(values)}
elif typename == ORDINAL:
values = sorted(ordinal_values[name])
ordinal_ranges[name] = (values[0], values[-1])
else:
raise ValueError(typename)
# Create a ragged index.
ragged_index = np.zeros(len(feature_names) + 1, dtype=np.int32)
feature_index = {}
for pos, name in enumerate(feature_names):
feature_index[name] = pos
typename = feature_types[name]
if typename == CATEGORICAL:
dim = len(categorical_values[name])
elif typename == ORDINAL:
dim = 2
ragged_index[pos + 1] = ragged_index[pos] + dim
ragged_index.flags.writeable = False
# Create a tree prior.
V = len(feature_names)
K = V * (V - 1) // 2
tree_prior = np.zeros(K, np.float32)
for group, logit in group_logits.items():
for source in group_sources[group]:
v1 = feature_index[source]
for target in group_targets[group]:
v2 = feature_index[target]
k = find_complete_edge(v1, v2)
tree_prior[k] = logit
tree_prior.flags.writeable = False
return {
'feature_names': feature_names,
'feature_index': feature_index,
'feature_types': feature_types,
'categorical_values': categorical_values,
'categorical_index': categorical_index,
'ordinal_ranges': ordinal_ranges,
'ragged_index': ragged_index,
'tree_prior': tree_prior,
}
def load_data(schema, data_csv_in, encoding='utf-8'):
print('Loading data from {}'.format(data_csv_in))
feature_index = schema['feature_index']
feature_types = schema['feature_types']
categorical_index = schema['categorical_index']
ordinal_ranges = schema['ordinal_ranges']
ragged_index = schema['ragged_index']
prototype_row = np.zeros(ragged_index[-1], np.int8)
# Load data in binary format.
rows = []
cells = 0
with csv_reader(data_csv_in, encoding) as reader:
header = list(next(reader))
metas = [None] * len(header)
for i, name in enumerate(header):
if name in feature_types:
metas[i] = ( #
name,
feature_types[name],
ragged_index[feature_index[name]],
categorical_index.get(name),
ordinal_ranges.get(name), )
for external_row in reader:
internal_row = prototype_row.copy()
for value, meta in zip(external_row, metas):
if meta is None:
continue
value = normalize_string(value)
if not value:
continue
name, typename, pos, index, min_max = meta
if typename == CATEGORICAL:
try:
value = index[value]
except KeyError:
continue
internal_row[pos + value] = 1
elif typename == ORDINAL:
try:
value = int(value)
except ValueError:
continue
if value < min_max[0] or min_max[1] < value:
continue
internal_row[pos + 0] = value - min_max[0]
internal_row[pos + 1] = min_max[1] - value
else:
raise ValueError(typename)
cells += 1
rows.append(internal_row)
print('Loaded {} cells in {} rows, {:0.1f}% observed'.format(
cells, len(rows), 100.0 * cells / len(rows) / len(feature_types)))
data = np.stack(rows)
data.flags.writeable = False
return data
def import_rows(schema, rows):
"""Import multiple rows of json data to internal format.
Args:
schema: A schema dict as returned by load_schema().
rows: A N-long list of sparse dicts mapping feature names to values,
where N is the number of rows. Extra keys and invalid values will
be silently ignored.
Returns:
An [N, R]-shaped numpy array of ragged data, where N is the number of
rows and R = schema['ragged_index'][-1].
"""
logger.debug('Importing {:d} rows', len(rows))
assert isinstance(rows, list)
assert all(isinstance(r, dict) for r in rows)
feature_index = schema['feature_index']
feature_types = schema['feature_types']
categorical_index = schema['categorical_index']
ordinal_ranges = schema['ordinal_ranges']
ragged_index = schema['ragged_index']
N = len(rows)
R = ragged_index[-1]
data = np.zeros([N, R], dtype=np.int8)
for external_row, internal_row in zip(rows, data):
for name, value in external_row.items():
try:
pos = ragged_index[feature_index[name]]
except KeyError:
continue
typename = feature_types[name]
if typename == CATEGORICAL:
index = categorical_index[name]
try:
value = index[value]
except KeyError:
continue
internal_row[pos + value] = 1
elif typename == ORDINAL:
min_max = ordinal_ranges[name]
try:
value = int(value)
except ValueError:
continue
if value < min_max[0] or min_max[1] < value:
continue
internal_row[pos + 0] = value - min_max[0]
internal_row[pos + 1] = min_max[1] - value
else:
raise ValueError(typename)
return data
def export_rows(schema, data):
"""Export multiple rows of internal data to json format.
Args:
schema: A schema dict as returned by load_schema().
data: An [N, R]-shaped numpy array of ragged data, where N is the
number of rows and R = schema['ragged_index'][-1].
Returns:
A N-long list of sparse dicts mapping feature names to json values,
where N is the number of rows.
"""
logger.debug('Exporting {:d} rows', data.shape[0])
assert data.dtype == np.int8
assert len(data.shape) == 2
ragged_index = schema['ragged_index']
assert data.shape[1] == ragged_index[-1]
feature_names = schema['feature_names']
feature_types = schema['feature_types']
categorical_values = schema['categorical_values']
ordinal_ranges = schema['ordinal_ranges']
rows = [{} for _ in range(data.shape[0])]
for external_row, internal_row in zip(rows, data):
for v, name in enumerate(feature_names):
beg, end = ragged_index[v:v + 2]
internal_cell = internal_row[beg:end]
if np.all(internal_cell == 0):
continue
typename = feature_types[name]
if typename == CATEGORICAL:
assert internal_cell.sum() == 1, internal_cell
value = categorical_values[name][internal_cell.argmax()]
elif typename == ORDINAL:
min_max = ordinal_ranges[name]
assert internal_cell.sum() == min_max[1] - min_max[0]
value = internal_cell[0] + min_max[0]
else:
raise ValueError(typename)
external_row[name] = value
return rows
@parsable
def import_data(data_csvs_in,
types_csv_in,
values_csv_in,
groups_csv_in,
dataset_out,
encoding='utf-8'):
"""Import a comma-delimited list of csv files into internal treecat format.
Common encodings include: utf-8, cp1252.
"""
schema = load_schema(types_csv_in, values_csv_in, groups_csv_in, encoding)
data = np.concatenate([
load_data(schema, data_csv_in, encoding)
for data_csv_in in data_csvs_in.split(',')
])
data.flags.writeable = False
print('Imported data shape: [{}, {}]'.format(data.shape[0], data.shape[1]))
ragged_index = schema['ragged_index']
for v, name in enumerate(schema['feature_names']):
beg, end = ragged_index[v:v + 2]
count = np.count_nonzero(data[:, beg:end].max(1))
if count == 0:
print('WARNING: No values found for feature {}'.format(name))
feature_types = [TY_MULTINOMIAL] * len(schema['feature_names'])
table = Table(feature_types, ragged_index, data)
dataset = {
'schema': schema,
'table': table,
}
pickle_dump(dataset, dataset_out)
@parsable
def cat(*paths):
"""Print .pkz files in human readable form."""
for path in paths:
assert path.endswith('.pkz')
print(pickle_load(path))
if __name__ == '__main__':
parsable()
|
467108
|
import logging
import socket
from threading import Thread
from QRServer.game.gameclient import GameClientHandler
from QRServer.game.gameserver import GameServer
log = logging.getLogger('game_listener')
def game_listener(conn_host, conn_port):
log.info('Game starting on ' + conn_host + ':' + str(conn_port))
gm_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
gm_s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
gm_s.bind((conn_host, conn_port))
gm_s.listen(5)
gs = GameServer()
try:
while True:
(client_socket, address) = gm_s.accept()
log.debug('Client connected from {}'.format(address))
client = GameClientHandler(client_socket, gs)
ct = Thread(target=client.run, daemon=True)
ct.start()
except KeyboardInterrupt:
gm_s.shutdown(1)
gm_s.close()
|
467154
|
from io import BytesIO
from mimetypes import guess_type
import requests
def get_image(url):
'''
Get the image at the provided URL and returns a file-like buffer
containing its bytes, and its MIME type.
'''
resp = requests.get(url)
if not resp.ok:
raise RuntimeError(
'Failed to download image from %s: %s - %s'
% (url, resp.status_code, resp.text)
)
img = BytesIO(resp.content)
mime_type = _mime_type(img, resp.headers, url)
return img, mime_type
def _mime_type(img, headers, url):
'''
Try to get the MIME type of the provided image using either the HTTP
response's headers, information available via its URL, or by reading the
beginning of the image's bytes.
'''
if ('Content-Type' in headers
and headers['Content-Type'].startswith('image/')):
return headers['Content-Type']
mime_type, _ = guess_type(url)
if mime_type and mime_type.startswith('image/'):
return mime_type
mime_type = mime_type_from_bytes(img.read())
img.seek(0) # Reset the BytesIO object, so that it can be read again.
return mime_type
def mime_type_from_path(img_path):
''' Guess the MIME type of the provided image. '''
mime_type, _ = guess_type(img_path)
if mime_type and mime_type.startswith('image/'):
return mime_type
with open(img_path, 'rb') as img:
return mime_type_from_bytes(img.read(_LEN_HEADER))
# Only the first 11 bytes of an image are useful to determine its type.
_LEN_HEADER = 11
def mime_type_from_bytes(img_bytes):
''' Guess the MIME type of the provided image. '''
img_bytes = img_bytes[:_LEN_HEADER]
if img_bytes[:4] == b'\xff\xd8\xff\xe0' and img_bytes[6:] == b'JFIF\0':
return 'image/jpeg'
elif img_bytes[1:4] == b'PNG':
return 'image/png'
else:
raise ValueError('Unsupported image type')
|
467165
|
import numpy as np
from automix.rules.rule import Rule
from automix.utils import quantization
class EventsAlignmentRule(Rule):
"""
The structure of the tracks should be aligned
TODO: change to the loop instead of the segments ?
"""
def __init__(self, event="boundaries"):
raise DeprecationWarning()
self.event = event
super(EventsAlignmentRule, self).__init__()
def getEvent(self, track):
if self.event == "boundaries":
return track.features["boundaries"]
if self.event == "kick":
return [
float(onset[0]) for onset in track.adtOnsets if onset[1] == "0"
]
if self.event == "beat":
return track.getBeats()
def run(self, mix, boundaries):
tracks = Rule.getTracks(mix, boundaries)
# set threshold of deviations
# 20Hz, lowest hearable frequency. below 50ms between two notes, we (should) hear only one note
# if the deviation is above an eighth note it's a different beat, thus it's ok.
minThreshold = 0.05
# deckTempo = max([track.features["tempo"] * track.playRate for track in tracks])
# compute the deck's location of each event
# we also remove events outside of the overlaping areas
# We still need to align before and after the boundaries of each tracks because we feel the structure/beat in long period of time
# returns: beforeOverlapA, startOverlapA, endTrackA, afterEndTrackA, startTrackB, endOverlapB, afterOverlapB
# localTimes = [
# Rule.getTransitionLocalTimes(
# tracks[i], tracks[i + 1], windowInBeats=window)
# for i in range(len(tracks) - 1)
# ]
localTimes = Rule.getTransitionsLocalTimes(tracks)
overlapsEvents = [([
tracks[i].getDeckTime(event, enableExceeding=False)
for event in self.getEvent(tracks[i])
if event > localTimes[i][0] and event < localTimes[i][3]
], [
tracks[i + 1].getDeckTime(event, enableExceeding=False)
for event in self.getEvent(tracks[i + 1])
if event > localTimes[i][4] and event < localTimes[i][6]
]) for i in range(len(tracks) - 1)]
# compute the distance between events for each overlaps
overlapErrors = [
np.abs(quantization.diff(trackAEvents,
trackBEvents, maxThreshold=10000))
for trackAEvents, trackBEvents in overlapsEvents
]
# if no segments can be aligned for one transition, it's 0
if not len([overlap for overlap in overlapErrors if len(overlap)]):
return 0
# add a 1 or a 0 for each event which should overlap (distance < maxDistance) if the difference is perceptible (distance > minDistance)
result = np.min([
np.mean([
1 if distance < minThreshold else 0 for distance in distances
]) for distances in overlapErrors
])
return result
def __str__(self):
return self.event + "overlap"
|
467169
|
import os
from setuptools import setup
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append('/'.join(os.path.join(path, filename).split('/')[1:]))
return paths
extra_files = package_files('panda/data')
setup(
name='panda',
version='0.0.1',
scripts=['bin/panda'],
packages=['panda'],
package_data={
'panda': extra_files
},
install_requires=[
'buildwebapi',
'ssh-util',
'shell-util',
'oms-client',
'pyVmomiwrapper',
'python-keystoneclient',
'python-neutronclient',
'python-novaclient',
'python-subunit',
'junitxml',
'testtools'
]
)
|
467189
|
import insightconnect_plugin_runtime
from .schema import ConfigurationCommandsInput, ConfigurationCommandsOutput, Component, Input, Output
import netmiko
from insightconnect_plugin_runtime.exceptions import PluginException
class ConfigurationCommands(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="configuration_commands",
description=Component.DESCRIPTION,
input=ConfigurationCommandsInput(),
output=ConfigurationCommandsOutput(),
)
self.device = None
def run(self, params={}):
self.device = self.connection.client(params.get(Input.HOST))
try:
return {Output.RESULTS: self.device.send_config_set(params.get(Input.COMMAND))}
except netmiko.NetMikoTimeoutException:
raise PluginException(
cause="Cannot connect/configure this device.",
assistance="Please check provided connection data and try again.",
)
|
467193
|
import random
while True:
player = input("stone, paper, scissor? : ")
computer = random.choice(['stone' , 'paper' , 'scissor'])
if player == computer:
print("Tie!")
elif player == "stone":
if computer == "paper":
print(" You lose! :( " , computer , "covers " , player)
else:
print(" You win! :) " , player , "smashes ", computer)
elif player == "paper":
if computer == "scissor":
print(" You lose! :( " , computer , "cuts " , player)
else:
print("You win! :) " , player , "covers " , computer)
elif player == "scissor":
if computer == "stone":
print(" You lose! :( ", computer , "smashes ", player)
else:
print("You win! :) ", player , "cuts ", computer)
else:
print("Please, check your spelling! :]")
|
467216
|
import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
about = {}
with open(os.path.join("django_graphene_permissions", "__version__.py")) as f:
exec(f.read(), about)
setuptools.setup(
name="django_graphene_permissions",
version=about["__version__"],
author="<NAME>",
author_email="<EMAIL>",
description="DRF like permission system for django graphene",
license='BSD',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/taoufik07/django-graphene-permissions",
packages=setuptools.find_packages(),
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Framework :: Django",
"Operating System :: OS Independent",
],
)
|
467217
|
import igv_notebook
from urllib.parse import urlparse
from os.path import basename
from IPython import get_ipython
from .navbar import show_navbar
# Attempt to import nbtools, if it's not installed create a dummy decorator that does nothing
try:
from nbtools import build_ui
except ImportError:
def build_ui(*args, **kwargs):
def decorator(func):
return func
return decorator
@build_ui(name="Integrative Genomics Viewer (IGV)",
description="Use igv.js to embed an interactive genome visualization",
logo="http://igv.org/web/img/favicon.ico",
origin="+",
run_label="Visualize",
color='#4a8797',
parameter_groups=[
{
"name": "Basic Parameters",
"parameters": ["genome", "tracks", "indices", "locus"],
},
{
"name": "Advanced Parameters",
"parameters": ["track_format", "track_type"],
"hidden": False,
"advanced": True
}
],
parameters={
"genome": {
"name": "genome",
"description": "Choose the genome for your data.",
"type": "choice",
"combo": True,
"sendto": False,
"default": "hg38",
"choices": { # Find a way to read this directly from genomes.json and populate dynamically
"Human (GRCh38/hg38)": "hg38",
"Human (CRCh37/hg19)": "hg19",
"Human (hg18)": "hg18",
"Mouse (GRCm38/mm10)": "mm10",
"Gorilla (gorGor4.1/gorGor4)": "gorGor4",
"Chimp (SAC 2.1.4/panTro4)": "panTro4",
"Bonobo (MPI-EVA panpan1.1/panPan2)": "panPan2",
"Pig (SGSC Sscrofa11.1/susScr11)": "susScr11",
"Cow (UMD_3.1.1/bosTau8)": "bosTau8",
"Dog (Broad CanFam3.1/canFam3)": "canFam3",
"Rat (RGCS 6.0/rn6)": "rn6",
"Zebrafish (GRCZ11/danRer11)": "danRer11",
"Zebrafish (GRCZ10/danRer10)": "danRer10",
"D. melanogaster (dm6)": "dm6",
"C. elegans (ce11)": "ce11",
"S. cerevisiae (sacCer3)": "sacCer3"
}
},
"tracks": {
"name": "tracks",
"description": "Enter the URL to the track dataset(s)",
"type": "file",
"optional": True,
"default": "",
"maximum": 100
},
"indices": {
"name": "indices",
"description": "Enter the URL to the index files that correspond to each track",
"type": "file",
"optional": True,
"default": "",
"maximum": 100
},
"track_format": {
"name": "track format",
"description": "Enter the format of the track datasets",
"type": "choice",
"combo": True,
"optional": True,
"default": "",
"choices": { # Display some common track formats
"": "",
"bw": "bw",
"bigwig": "bigwig",
"wig": "wig",
"bedgraph": "bedgraph",
"tdf": "tdf",
"vcf": "vcf",
"seg": "seg",
"mut": "mut",
"bam": "bam",
"cram": "cram",
"bedpe": "bedpe",
"bedpe-loop": "bedpe-loop",
"bp": "bp",
"gwas": "gwas",
"bed": "bed",
"bigbed": "bigbed",
"bb": "bb"
}
},
"track_type": {
"name": "track type",
"description": "Enter the type of the track datasets",
"type": "choice",
"combo": True,
"optional": True,
"default": "",
"choices": { # Display some common track types
"": "",
"annotation": "annotation",
"variant": "variant",
"alignment": "alignment",
"interaction": "interaction",
"wig": "wig",
"seg": "seg",
"mut": "mut",
"arc": "arc",
"gwas": "gwas",
"bedtype": "bedtype"
}
},
"locus": {
"name": "locus",
"description": "Provide a locus or gene of interest for your data",
"type": "text",
"optional": True,
"default": "",
}
})
def igv_tool(genome, tracks=None, indices=None, track_format=None, track_type=None, locus=None):
# Create the navbar
show_navbar()
# Create the genome browser and display it
igv_notebook.file_reader.get_ipython = get_ipython # Workaround for a bug in igv_notebook
igv_notebook.init()
browser = igv_notebook.Browser({"genome": genome, "locus": locus})
# Ensure tracks and indices are represented as lists
if not tracks: tracks = []
if not indices: indices = []
if type(tracks) == str: tracks = [tracks]
if type(indices) == str: indices = [indices]
# Add tracks to the browser
for i in range(len(tracks)):
track_spec = {
"name": basename(urlparse(tracks[i]).path),
"url": tracks[i]
}
if track_format: track_spec['format']: track_format
if track_type: track_spec['type']: track_type
if i < len(indices) and indices[i]:
track_spec['indexURL'] = indices[i]
else:
track_spec['indexed'] = False
browser.load_track(track_spec)
|
467236
|
from torchmeta.datasets.triplemnist import TripleMNIST
from torchmeta.datasets.doublemnist import DoubleMNIST
from torchmeta.datasets.cub import CUB
from torchmeta.datasets.cifar100 import CIFARFS, FC100
from torchmeta.datasets.miniimagenet import MiniImagenet
from torchmeta.datasets.omniglot import Omniglot
from torchmeta.datasets.tieredimagenet import TieredImagenet
from torchmeta.datasets.tcga import TCGA
from torchmeta.datasets import helpers
__all__ = [
'TCGA',
'Omniglot',
'MiniImagenet',
'TieredImagenet',
'CIFARFS',
'FC100',
'CUB',
'DoubleMNIST',
'TripleMNIST',
'helpers'
]
|
467239
|
from datetime import timedelta, tzinfo, datetime
import re
TIMEDELTA_ZERO = timedelta(0)
DATETIME_RE = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
class _UTCTZ(tzinfo):
"""
UTC implementation taken from Python's docs.
"""
def __repr__(self):
return "<{0}>".format(self.tzname(None))
def utcoffset(self, dt):
return TIMEDELTA_ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return TIMEDELTA_ZERO
#: UTC tzinfo instance
utctz = _UTCTZ()
class _FixedOffsetTZ(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def __repr__(self):
return "<{0}>".format(self.tzname(None))
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return TIMEDELTA_ZERO
def get_fixed_timezone(offset):
"""
Returns a tzinfo instance with a fixed offset from UTC.
``offset`` should be provided in minutes or as a ``timedelta``.
"""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
offset //= 1 # ensure it's more than a minute
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return _FixedOffsetTZ(offset, name)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ``ValueError`` if the input isn't well formatted.
"""
match = DATETIME_RE.match(value)
if not match:
raise ValueError('Malformed date string')
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
valuetz = kw.pop('tzinfo')
if valuetz == 'Z':
valuetz = utctz
elif valuetz is not None:
offset_mins = int(valuetz[-2:]) if len(valuetz) > 3 else 0
offset = 60 * int(valuetz[1:3]) + offset_mins
if valuetz[0] == '-':
offset = -offset
valuetz = get_fixed_timezone(offset)
kw = dict((k, int(v)) for k, v in kw.items() if v is not None)
kw['tzinfo'] = valuetz
return datetime(**kw)
|
467269
|
from plugin.core.filters import Filters
from plugin.core.helpers.variable import merge
from plugin.managers.core.base import Get, Manager, Update
from plugin.managers.core.exceptions import ClientFilteredException
from plugin.models import Client, ClientRule
from exception_wrappers.libraries import apsw
from plex import Plex
import logging
import peewee
log = logging.getLogger(__name__)
class GetClient(Get):
def __call__(self, player):
player = self.manager.parse_player(player)
return super(GetClient, self).__call__(
Client.key == player['key']
)
def or_create(self, player, fetch=False, match=False, filtered_exception=False):
player = self.manager.parse_player(player)
try:
# Create new client
obj = self.manager.create(
key=player['key']
)
# Update newly created object
self.manager.update(
obj, player,
fetch=fetch,
match=match,
filtered_exception=filtered_exception
)
return obj
except (apsw.ConstraintError, peewee.IntegrityError):
# Return existing user
obj = self(player)
if fetch or match:
# Update existing `User`
self.manager.update(
obj, player,
fetch=fetch,
match=match,
filtered_exception=filtered_exception
)
return obj
class UpdateClient(Update):
def __call__(self, obj, player, fetch=False, match=False, filtered_exception=False):
player = self.manager.parse_player(player)
filtered, data = self.to_dict(
obj, player,
fetch=fetch,
match=match
)
updated = super(UpdateClient, self).__call__(
obj, data
)
if filtered and filtered_exception:
raise ClientFilteredException
return updated
def to_dict(self, obj, player, fetch=False, match=False):
result = {
'name': player['title']
}
# Fill `result` with available fields
if player.get('platform'):
result['platform'] = player['platform']
if player.get('product'):
result['product'] = player['product']
client = None
filtered = False
if fetch or match:
# Fetch client from plex server
result, client = self.fetch(result, player)
if match:
# Try match client against a rule
filtered, result = self.match(
result, client, player
)
return filtered, result
@staticmethod
def fetch(result, player):
# Fetch client details
client = Plex.clients().get(player['key'])
if not client:
log.info('Unable to find client with key %r', player['key'])
return result, None
# Merge client details from plex API
result = merge(result, dict([
(key, getattr(client, key)) for key in [
'device_class',
'product',
'version',
'host',
'address',
'port',
'protocol',
'protocol_capabilities',
'protocol_version'
] if getattr(client, key)
]))
return result, client
@staticmethod
def match(result, client, player):
# Apply global filters
if not Filters.is_valid_client(player) or\
not Filters.is_valid_address(client):
# Client didn't pass filters, update `account` attribute and return
result['account'] = None
return True, result
# Find matching `ClientRule`
address = client['address'] if client else None
rule = (ClientRule
.select()
.where(
(ClientRule.key == player['key']) |
(ClientRule.key == '*') |
(ClientRule.key == None),
(ClientRule.name == player['title']) |
(ClientRule.name == '*') |
(ClientRule.name == None),
(ClientRule.address == address) |
(ClientRule.address == '*') |
(ClientRule.address == None)
)
.order_by(
ClientRule.priority.asc()
)
.first()
)
log.debug('Activity matched against rule: %r', rule)
if rule:
# Process rule
if rule.account_id is not None:
result['account'] = rule.account_id
else:
return True, result
else:
result['account'] = None
return False, result
class ClientManager(Manager):
get = GetClient
update = UpdateClient
model = Client
@classmethod
def parse_player(cls, player):
if type(player) is not dict:
# Build user dict from object
player = {
'key': player.machine_identifier,
'title': player.title,
'platform': player.platform,
'product': player.product
}
# Strip "_Video" suffix from the `key`
if player.get('key') and player['key'].endswith('_Video'):
# Update player key
player['key'] = player['key'].rstrip('_Video')
return player
|
467345
|
from typing import Tuple, Dict
import aiohttp
from pyot.pipeline.handler import ErrorHandler
from pyot.pipeline.token import PipelineToken
from pyot.endpoints.merakicdn import MerakiCDNEndpoint
from pyot.utils.parsers import safejson
from pyot.utils.logging import Logger
from pyot.utils.nullsafe import _
from .base import Store, StoreType
LOGGER = Logger(__name__)
class MerakiCDN(Store):
type = StoreType.SERVICE
def __init__(self, game: str, error_handler: Dict[int, Tuple] = None, log_level: int = 0):
self.game = game
self.handler = ErrorHandler(error_handler, 800)
self.endpoints = MerakiCDNEndpoint(game)
self.log_level = log_level
async def get(self, token: PipelineToken, session: aiohttp.ClientSession, **kwargs) -> Dict:
url = self.endpoints.resolve(token)
error_token = self.handler.get_token()
while error_token.allow():
try:
response = await session.request("GET", url)
LOGGER.log(self.log_level, f"[Trace: {self.game} > MerakiCDN] GET: {token.value}")
except Exception:
response = None
status = _(response).status or 408
if status == 200:
return await response.json(encoding="utf-8", content_type=None, loads=safejson)
await error_token.consume(status, token.value)
|
467352
|
from .functions import vtos, stov
class ModelFile:
def __init__(self, filename, mode='r'):
self.__fp = open(filename, mode)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__fp.close()
return False
def write(self, x):
print(x, file=self.__fp)
def __write_vector(self, x):
self.write(vtos(x))
def __write_matrix(self, x):
for row in x:
self.__write_vector(row)
def read(self):
return next(self.__fp).strip()
def __read_vector(self, x, tp):
data = stov(self.read(), tp)
for i in range(len(data)):
x[i] = data[i]
def __read_matrix(self, x, tp):
for row in x:
self.__read_vector(row, tp)
def write_embed(self, f):
self.__write_matrix(f.W)
def write_linear(self, f):
self.__write_matrix(f.W)
self.__write_vector(f.b)
def read_embed(self, f):
self.__read_matrix(f.W, float)
def read_linear(self, f):
self.__read_matrix(f.W, float)
self.__read_vector(f.b, float)
def get_file_pointer(self):
return self.__fp
|
467370
|
import tensorflow as tf
from detector.constants import BATCH_NORM_MOMENTUM, BATCH_NORM_EPSILON, DATA_FORMAT
def batch_norm_relu(x, is_training, use_relu=True, name=None):
x = tf.layers.batch_normalization(
inputs=x, axis=1 if DATA_FORMAT == 'channels_first' else 3,
momentum=BATCH_NORM_MOMENTUM, epsilon=BATCH_NORM_EPSILON,
center=True, scale=True, training=is_training,
fused=True, name=name
)
return x if not use_relu else tf.nn.relu(x)
def conv2d_same(x, num_filters, kernel_size=3, stride=1, rate=1, name=None):
if stride == 1:
return tf.layers.conv2d(
inputs=x, filters=num_filters,
kernel_size=(kernel_size, kernel_size),
strides=(stride, stride), dilation_rate=(rate, rate),
padding='same', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=DATA_FORMAT, name=name
)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if DATA_FORMAT == 'channels_first':
paddings = [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]
else:
paddings = [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]
return tf.layers.conv2d(
inputs=tf.pad(x, paddings), filters=num_filters,
kernel_size=(kernel_size, kernel_size),
strides=(stride, stride), dilation_rate=(rate, rate),
padding='valid', use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=DATA_FORMAT, name=name
)
|
467382
|
import pytest
import torch
from ludwig.modules import metric_modules
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(6).float()])
def test_rmse_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.RMSEMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.tensor([0.2, 0.3, 0.8, 0.1])])
@pytest.mark.parametrize("target", [torch.tensor([0, 0, 1, 1])])
@pytest.mark.parametrize("output", [torch.tensor(0.5)])
def test_roc_auc_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.ROCAUCMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(0.7527).float()])
def test_rmspe_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.RMSPEMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize(
"preds,target,num_outputs,output",
[
(torch.arange(3), torch.arange(3, 6), 1, torch.tensor(-12.5)),
(torch.arange(6).reshape(3, 2), torch.arange(6, 12).reshape(3, 2), 2, torch.tensor(-12.5)),
],
)
def test_r2_score(preds: torch.Tensor, target: torch.Tensor, num_outputs: int, output: torch.Tensor):
metric = metric_modules.R2Score(num_outputs=num_outputs)
metric.update(preds, target)
assert metric.compute() == output
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(-21.4655).float()])
def test_bwcewl_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.BWCEWLMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize("preds", [torch.tensor([[0.5, 0.5], [0.2, 0.8], [0.6, 0.4]])])
@pytest.mark.parametrize("target", [torch.tensor([1, 1, 0])])
@pytest.mark.parametrize("output", [torch.tensor(0.5763)])
def test_softmax_cross_entropy_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.SoftmaxCrossEntropyMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(-42.9311).float()])
def test_sigmoid_cross_entropy_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.SigmoidCrossEntropyMetric()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize(
"preds,target,output",
[
(
torch.tensor([[0, 1], [3, 2], [4, 5]]),
torch.tensor([[0, 1], [1, 2], [4, 5]]),
torch.tensor(0.8),
),
(
torch.tensor([[0, 1, 2], [1, 3, 4], [3, 4, 5]]),
torch.tensor([[0, 1, 2], [1, 1, 4], [3, 4, 5]]),
torch.tensor(0.8750),
),
(
torch.tensor([[1, 5, 1, 5, 1, 5, 12, 12, 12], [10, 1, 5, 1, 5, 12, 12, 12, 12]]),
torch.tensor([[1, 9, 5, 7, 5, 9, 13, 6, 0], [1, 9, 7, 13, 4, 7, 7, 7, 0]]),
torch.tensor(0.05555555),
),
],
)
def test_token_accuracy_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.TokenAccuracyMetric()
metric.update(preds, target)
assert torch.allclose(metric.compute(), output)
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2)])
@pytest.mark.parametrize("target", [torch.tensor([[0, 1], [2, 1], [4, 5]]).float()])
@pytest.mark.parametrize("output", [torch.tensor(0.8333).float()])
def test_category_accuracy(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.CategoryAccuracy()
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize(
"preds,target,output,k",
[
(
torch.tensor([[0.1, 0.9, 0], [0.3, 0.1, 0.6], [0.2, 0.5, 0.3]]),
torch.tensor([0, 1, 2]),
torch.tensor(0.6667).float(),
2,
)
],
)
def test_hits_at_k_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor, k: int):
metric = metric_modules.HitsAtKMetric(top_k=k)
metric.update(preds, target)
assert torch.isclose(output, metric.compute(), rtol=0.0001)
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(6).float()])
def test_mae_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.MAEMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.arange(6).reshape(3, 2).float()])
@pytest.mark.parametrize("target", [torch.arange(6, 12).reshape(3, 2).float()])
@pytest.mark.parametrize("output", [torch.tensor(36).float()])
def test_mse_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.MSEMetric()
metric.update(preds, target)
assert output == metric.compute()
@pytest.mark.parametrize("preds", [torch.tensor([[0, 1], [1, 1]])])
@pytest.mark.parametrize("target", [torch.tensor([[1, 0], [1, 1]])])
@pytest.mark.parametrize("output", [torch.tensor(0.5)])
def test_jaccard_metric(preds: torch.Tensor, target: torch.Tensor, output: torch.Tensor):
metric = metric_modules.JaccardMetric()
metric.update(preds, target)
assert output == metric.compute()
|
467425
|
import copy
import itertools
from enum import IntEnum
from .config import Config
class Urgency(IntEnum):
LOW = 0
MEDIUM = 1
CRITICAL = 2
class Notification:
__slots__ = (
"id",
"app_name",
"app_icon",
"body",
"summary",
"actions",
"created_at",
"expires_at",
"urgency",
"config",
"timer",
)
def __init__(
self,
id,
app_name,
app_icon,
summary,
body,
actions,
created_at,
expires_at=None,
urgency=0,
):
self.id = id
self.app_name = app_name
self.app_icon = app_icon
self.summary = summary
self.body = body
self.actions = actions
self.created_at = created_at
self.expires_at = expires_at
self.urgency = urgency
self.config = Config
self.timer = None
@property
def pre_action_hooks(self):
return self.config.pre_action_hooks
@property
def post_action_hooks(self):
return self.config.post_action_hooks
@property
def pre_close_hooks(self):
return self.config.pre_close_hooks
@property
def post_close_hooks(self):
return self.config.post_close_hooks
@property
def expires(self):
return self.config.expires
def formatted(self):
return self.config.format_notification(self)
def single_line(self):
return self.config.single_line(self)
def keys(self):
return self.config.get_keys(self)
def strip(self):
return Notification(
self.id,
self.app_name,
self.app_icon,
self.summary,
self.body,
self.actions,
self.created_at,
self.expires_at,
self.urgency,
)
def __len__(self):
return 1
@property
def best(self):
return self
def leafs(self):
return [self]
def __repr__(self):
return (
"<Notification: "
f"id:{self.id} "
f'app_name:"{self.app_name}" '
f'app_icon:"{self.app_icon}" '
f'summary:"{self.summary}" '
f'body:"{self.body}" '
f"actions:{self.actions} "
f"urgency:{self.urgency} "
f"created_at:{self.created_at} "
f"expires_at:{self.expires_at}>"
)
def __str__(self):
return self.__repr__()
class NotificationCluster:
__slots__ = "notifications", "_best", "_len", "_urgency"
def __init__(self):
self.notifications = dict()
self._best = None
self._len = 0
self._urgency = None
@property
def urgency(self):
if self._urgency is None and self.notifications:
self._urgency = self.best.urgency
return self._urgency or 0
def formatted(self):
if len(self) == 1:
return self.best.formatted()
dummy = self.best.strip()
dummy.app_name = f"{dummy.app_name} ({len(self)})"
dummy.config = self.best.config
return dummy.formatted()
def reset(self):
self._len = 0
self._urgency = None
self._best = None
def add(self, key, notification):
if self._best is None or notification.urgency >= self.best.urgency:
self._best = notification
self._urgency = self.best.urgency
self._len += 1
if isinstance(key, int):
self.notifications[key] = notification
def remove(self, key):
if self.urgency == self.notifications[key].urgency:
self._urgency = None
if self.notifications[key] == self.best:
self._best = None
self._len -= len(self.notifications[key])
del self.notifications[key]
@property
def best(self):
if self._best is None and self.notifications:
self._best = max(
self.notifications.values(),
key=lambda x: (x.urgency, x.best.created_at),
).best
return self._best
def __len__(self):
self._len = self._len or sum(len(n) for n in self.notifications.values())
return self._len or 0
def leafs(self):
return list(
itertools.chain.from_iterable(
v.leafs() for v in self.notifications.values()
)
)
def __str__(self):
return str(self.notifications)
def __repr__(self):
return repr(self.notifications)
|
467434
|
import unittest
from unittest import mock
from django.core.exceptions import ValidationError
class TestValidationError(unittest.TestCase):
def test_messages_concatenates_error_dict_values(self):
message_dict = {}
exception = ValidationError(message_dict)
self.assertEqual(sorted(exception.messages), [])
message_dict['field1'] = ['E1', 'E2']
exception = ValidationError(message_dict)
self.assertEqual(sorted(exception.messages), ['E1', 'E2'])
message_dict['field2'] = ['E3', 'E4']
exception = ValidationError(message_dict)
self.assertEqual(sorted(exception.messages), ['E1', 'E2', 'E3', 'E4'])
def test_eq(self):
error1 = ValidationError('message')
error2 = ValidationError('message', code='my_code1')
error3 = ValidationError('message', code='my_code2')
error4 = ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm1': 'val1', 'parm2': 'val2'},
)
error5 = ValidationError({'field1': 'message', 'field2': 'other'})
error6 = ValidationError({'field1': 'message'})
error7 = ValidationError([
ValidationError({'field1': 'field error', 'field2': 'other'}),
'message',
])
self.assertEqual(error1, ValidationError('message'))
self.assertNotEqual(error1, ValidationError('message2'))
self.assertNotEqual(error1, error2)
self.assertNotEqual(error1, error4)
self.assertNotEqual(error1, error5)
self.assertNotEqual(error1, error6)
self.assertNotEqual(error1, error7)
self.assertEqual(error1, mock.ANY)
self.assertEqual(error2, ValidationError('message', code='my_code1'))
self.assertNotEqual(error2, ValidationError('other', code='my_code1'))
self.assertNotEqual(error2, error3)
self.assertNotEqual(error2, error4)
self.assertNotEqual(error2, error5)
self.assertNotEqual(error2, error6)
self.assertNotEqual(error2, error7)
self.assertEqual(error4, ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm1': 'val1', 'parm2': 'val2'},
))
self.assertNotEqual(error4, ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code2',
params={'parm1': 'val1', 'parm2': 'val2'},
))
self.assertNotEqual(error4, ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm2': 'val2'},
))
self.assertNotEqual(error4, ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm2': 'val1', 'parm1': 'val2'},
))
self.assertNotEqual(error4, ValidationError(
'error val1 val2',
code='my_code1',
))
# params ordering is ignored.
self.assertEqual(error4, ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm2': 'val2', 'parm1': 'val1'},
))
self.assertEqual(
error5,
ValidationError({'field1': 'message', 'field2': 'other'}),
)
self.assertNotEqual(
error5,
ValidationError({'field1': 'message', 'field2': 'other2'}),
)
self.assertNotEqual(
error5,
ValidationError({'field1': 'message', 'field3': 'other'}),
)
self.assertNotEqual(error5, error6)
# fields ordering is ignored.
self.assertEqual(
error5,
ValidationError({'field2': 'other', 'field1': 'message'}),
)
self.assertNotEqual(error7, ValidationError(error7.error_list[1:]))
self.assertNotEqual(
ValidationError(['message']),
ValidationError([ValidationError('message', code='my_code')]),
)
# messages ordering is ignored.
self.assertEqual(
error7,
ValidationError(list(reversed(error7.error_list))),
)
self.assertNotEqual(error4, ValidationError([error4]))
self.assertNotEqual(ValidationError([error4]), error4)
self.assertNotEqual(error4, ValidationError({'field1': error4}))
self.assertNotEqual(ValidationError({'field1': error4}), error4)
def test_eq_nested(self):
error_dict = {
'field1': ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code',
params={'parm1': 'val1', 'parm2': 'val2'},
),
'field2': 'other',
}
error = ValidationError(error_dict)
self.assertEqual(error, ValidationError(dict(error_dict)))
self.assertEqual(error, ValidationError({
'field1': ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code',
params={'parm2': 'val2', 'parm1': 'val1'},
),
'field2': 'other',
}))
self.assertNotEqual(error, ValidationError(
{**error_dict, 'field2': 'message'},
))
self.assertNotEqual(error, ValidationError({
'field1': ValidationError(
'error %(parm1)s val2',
code='my_code',
params={'parm1': 'val1'},
),
'field2': 'other',
}))
def test_hash(self):
error1 = ValidationError('message')
error2 = ValidationError('message', code='my_code1')
error3 = ValidationError('message', code='my_code2')
error4 = ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm1': 'val1', 'parm2': 'val2'},
)
error5 = ValidationError({'field1': 'message', 'field2': 'other'})
error6 = ValidationError({'field1': 'message'})
error7 = ValidationError([
ValidationError({'field1': 'field error', 'field2': 'other'}),
'message',
])
self.assertEqual(hash(error1), hash(ValidationError('message')))
self.assertNotEqual(hash(error1), hash(ValidationError('message2')))
self.assertNotEqual(hash(error1), hash(error2))
self.assertNotEqual(hash(error1), hash(error4))
self.assertNotEqual(hash(error1), hash(error5))
self.assertNotEqual(hash(error1), hash(error6))
self.assertNotEqual(hash(error1), hash(error7))
self.assertEqual(
hash(error2),
hash(ValidationError('message', code='my_code1')),
)
self.assertNotEqual(
hash(error2),
hash(ValidationError('other', code='my_code1')),
)
self.assertNotEqual(hash(error2), hash(error3))
self.assertNotEqual(hash(error2), hash(error4))
self.assertNotEqual(hash(error2), hash(error5))
self.assertNotEqual(hash(error2), hash(error6))
self.assertNotEqual(hash(error2), hash(error7))
self.assertEqual(hash(error4), hash(ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm1': 'val1', 'parm2': 'val2'},
)))
self.assertNotEqual(hash(error4), hash(ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code2',
params={'parm1': 'val1', 'parm2': 'val2'},
)))
self.assertNotEqual(hash(error4), hash(ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm2': 'val2'},
)))
self.assertNotEqual(hash(error4), hash(ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm2': 'val1', 'parm1': 'val2'},
)))
self.assertNotEqual(hash(error4), hash(ValidationError(
'error val1 val2',
code='my_code1',
)))
# params ordering is ignored.
self.assertEqual(hash(error4), hash(ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code1',
params={'parm2': 'val2', 'parm1': 'val1'},
)))
self.assertEqual(
hash(error5),
hash(ValidationError({'field1': 'message', 'field2': 'other'})),
)
self.assertNotEqual(
hash(error5),
hash(ValidationError({'field1': 'message', 'field2': 'other2'})),
)
self.assertNotEqual(
hash(error5),
hash(ValidationError({'field1': 'message', 'field3': 'other'})),
)
self.assertNotEqual(error5, error6)
# fields ordering is ignored.
self.assertEqual(
hash(error5),
hash(ValidationError({'field2': 'other', 'field1': 'message'})),
)
self.assertNotEqual(
hash(error7),
hash(ValidationError(error7.error_list[1:])),
)
self.assertNotEqual(
hash(ValidationError(['message'])),
hash(ValidationError([ValidationError('message', code='my_code')])),
)
# messages ordering is ignored.
self.assertEqual(
hash(error7),
hash(ValidationError(list(reversed(error7.error_list)))),
)
self.assertNotEqual(hash(error4), hash(ValidationError([error4])))
self.assertNotEqual(hash(ValidationError([error4])), hash(error4))
self.assertNotEqual(
hash(error4),
hash(ValidationError({'field1': error4})),
)
def test_hash_nested(self):
error_dict = {
'field1': ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code',
params={'parm2': 'val2', 'parm1': 'val1'},
),
'field2': 'other',
}
error = ValidationError(error_dict)
self.assertEqual(hash(error), hash(ValidationError(dict(error_dict))))
self.assertEqual(hash(error), hash(ValidationError({
'field1': ValidationError(
'error %(parm1)s %(parm2)s',
code='my_code',
params={'parm1': 'val1', 'parm2': 'val2'},
),
'field2': 'other',
})))
self.assertNotEqual(hash(error), hash(ValidationError(
{**error_dict, 'field2': 'message'},
)))
self.assertNotEqual(hash(error), hash(ValidationError({
'field1': ValidationError(
'error %(parm1)s val2',
code='my_code',
params={'parm1': 'val1'},
),
'field2': 'other',
})))
|
467447
|
import multiprocessing
import tensorflow as tf
from tensorflow.python.ops.signal.fft_ops import ifft2d, fft2d, fft, ifft
def tf_mp_ifft(kspace):
k_shape_x = tf.shape(kspace)[-1]
batched_kspace = tf.reshape(kspace, (-1, k_shape_x))
batched_image = tf.map_fn(
ifft,
batched_kspace,
parallel_iterations=multiprocessing.cpu_count(),
)
image = tf.reshape(batched_image, tf.shape(kspace))
return image
def tf_mp_fft(kspace):
k_shape_x = tf.shape(kspace)[-1]
batched_kspace = tf.reshape(kspace, (-1, k_shape_x))
batched_image = tf.map_fn(
fft,
batched_kspace,
parallel_iterations=multiprocessing.cpu_count(),
)
image = tf.reshape(batched_image, tf.shape(kspace))
return image
def tf_mp_ifft2d(kspace):
k_shape_x = tf.shape(kspace)[-2]
k_shape_y = tf.shape(kspace)[-1]
batched_kspace = tf.reshape(kspace, (-1, k_shape_x, k_shape_y))
batched_image = tf.map_fn(
ifft2d,
batched_kspace,
parallel_iterations=multiprocessing.cpu_count(),
)
image = tf.reshape(batched_image, tf.shape(kspace))
return image
def tf_mp_fft2d(image):
shape_x = tf.shape(image)[-2]
shape_y = tf.shape(image)[-1]
batched_image = tf.reshape(image, (-1, shape_x, shape_y))
batched_kspace = tf.map_fn(
fft2d,
batched_image,
parallel_iterations=multiprocessing.cpu_count(),
)
kspace = tf.reshape(batched_kspace, tf.shape(image))
return kspace
def tf_mp_ifft3d(kspace):
image = tf_mp_fourier3d(kspace, trans_type='inv')
return image
def tf_mp_fft3d(image):
kspace = tf_mp_fourier3d(image, trans_type='forw')
return kspace
def tf_mp_fourier3d(x, trans_type='inv'):
fn_2d, fn_1d = (ifft2d, ifft) if trans_type == 'inv' else (fft2d, fft)
n_slices = tf.shape(x)[0]
n_coils = tf.shape(x)[1]
shape_z = tf.shape(x)[-3]
shape_x = tf.shape(x)[-2]
shape_y = tf.shape(x)[-1]
reshaped_x = tf.reshape(x, (-1, shape_x, shape_y))
batched_incomplete_y = tf.map_fn(
fn_2d,
reshaped_x,
parallel_iterations=multiprocessing.cpu_count(),
)
incomplete_y = tf.reshape(batched_incomplete_y, tf.shape(x))
incomplete_y_reshaped = tf.transpose(incomplete_y, [0, 1, 3, 4, 2])
batched_incomplete_y_reshaped = tf.reshape(incomplete_y_reshaped, (-1, shape_z))
batched_y = tf.map_fn(
fn_1d,
batched_incomplete_y_reshaped,
parallel_iterations=multiprocessing.cpu_count(),
)
y_reshaped = tf.reshape(batched_y, [n_slices, n_coils, shape_x, shape_y, shape_z])
y = tf.transpose(y_reshaped, [0, 1, 4, 2, 3])
return y
# Generate a fourier dictionary to simplify its use below.
# In the end we have the following list:
# fourier_dict[do_ifft][multiprocessing][rank of image - 1]
fourier_list = [
[
[
tf.signal.fft,
tf.signal.fft2d,
tf.signal.fft3d,
],
[
tf_mp_fft,
tf_mp_fft2d,
tf_mp_fft3d,
]
],
[
[
tf.signal.ifft,
tf.signal.ifft2d,
tf.signal.ifft3d,
],
[
tf_mp_ifft,
tf_mp_ifft2d,
tf_mp_ifft3d,
]
]
]
def scale_and_fft_on_image_volume(x, scaling_coef, grid_size, im_size, norm, im_rank=2, multiprocessing=False,
do_ifft=False):
"""Applies the FFT and any relevant scaling factors to x.
Args:
x (tensor): The image to be FFT'd.
scaling_coef (tensor): The NUFFT scaling coefficients to be multiplied
prior to FFT.
grid_size (tensor): The oversampled grid size.
im_size (tensor): The image dimensions for x.
norm (str): Type of normalization factor to use. If 'ortho', uses
orthogonal FFT, otherwise, no normalization is applied.
do_ifft (bool, optional, default False): When true, the IFFT is
carried out on signal rather than FFT. This is needed for gradient.
Returns:
tensor: The oversampled FFT of x.
"""
# zero pad for oversampled nufft
# we don't need permutations since the fft in fourier is done on the
# innermost dimensions and we are handling complex tensors
pad_sizes = [
(0, 0), # batch dimension
(0, 0), # coil dimension
] + [
(0, grid_size[0] - im_size[0]), # nx
]
if im_rank >= 2:
pad_sizes += [(0, grid_size[1] - im_size[1])]
if im_rank == 3:
pad_sizes += [(0, grid_size[2] - im_size[2])] # nz
scaling_coef = tf.cast(scaling_coef, x.dtype)
scaling_coef = scaling_coef[None, None, ...]
# multiply by scaling coefs
if do_ifft:
x = x * tf.math.conj(scaling_coef)
else:
x = x * scaling_coef
# zero pad and fft
x = tf.pad(x, pad_sizes)
x = fourier_list[do_ifft][multiprocessing][im_rank - 1](x)
if norm == 'ortho':
scaling_factor = tf.cast(tf.reduce_prod(grid_size), x.dtype)
if do_ifft:
x = x * tf.sqrt(scaling_factor)
else:
x = x / tf.sqrt(scaling_factor)
return x
def ifft_and_scale_on_gridded_data(x, scaling_coef, grid_size, im_size, norm, im_rank=2, multiprocessing=False):
"""Applies the iFFT and any relevant scaling factors to x.
Args:
x (tensor): The gridded data to be iFFT'd.
scaling_coef (tensor): The NUFFT scaling coefficients to be multiplied
after iFFT.
grid_size (tensor): The oversampled grid size.
im_size (tensor): The image dimensions for x.
norm (str): Type of normalization factor to use. If 'ortho', uses
orthogonal iFFT, otherwise, no normalization is applied.
Returns:
tensor: The iFFT of x.
"""
# we don't need permutations since the fft in fourier is done on the
# innermost dimensions and we are handling complex tensors
# do the inverse fft
x = fourier_list[True][multiprocessing][im_rank - 1](x)
im_size = tf.cast(im_size, tf.int32)
# crop to output size
x = x[:, :, :im_size[0]]
if im_rank >=2:
if im_rank == 3:
x = x[..., :im_size[1], :im_size[2]]
else:
x = x[..., :im_size[1]]
# scaling
scaling_factor = tf.cast(tf.reduce_prod(grid_size), x.dtype)
if norm == 'ortho':
x = x * tf.sqrt(scaling_factor)
else:
x = x * scaling_factor
# scaling coefficient multiply
scaling_coef = tf.cast(scaling_coef, x.dtype)
scaling_coef = scaling_coef[None, None, ...]
x = x * tf.math.conj(scaling_coef)
# this might be nice to try at some point more like an option rather
# than a try except.
# # try to broadcast multiply - batch over coil if not enough memory
# raise_error = False
# try:
# x = x * tf.math.conj(scaling_coef)
# except RuntimeError as e:
# if 'out of memory' in str(e) and not raise_error:
# torch.cuda.empty_cache()
# for coilind in range(x.shape[1]):
# x[:, coilind, ...] = conj_complex_mult(
# x[:, coilind:coilind + 1, ...], scaling_coef, dim=2)
# raise_error = True
# else:
# raise e
# except BaseException:
# raise e
#
return x
# used for toep thing
# def fft_filter(x, kern, norm=None):
# """FFT-based filtering on a 2-size oversampled grid.
# """
# x = x.clone()
#
# im_size = torch.tensor(x.shape).to(torch.long)[3:]
# grid_size = im_size * 2
#
# # set up n-dimensional zero pad
# pad_sizes = []
# permute_dims = [0, 1]
# inv_permute_dims = [0, 1, 2 + grid_size.shape[0]]
# for i in range(grid_size.shape[0]):
# pad_sizes.append(0)
# pad_sizes.append(int(grid_size[-1 - i] - im_size[-1 - i]))
# permute_dims.append(3 + i)
# inv_permute_dims.append(2 + i)
# permute_dims.append(2)
# pad_sizes = tuple(pad_sizes)
# permute_dims = tuple(permute_dims)
# inv_permute_dims = tuple(inv_permute_dims)
#
# # zero pad and fft
# x = F.pad(x, pad_sizes)
# x = x.permute(permute_dims)
# x = torch.fft(x, grid_size.numel())
# if norm == 'ortho':
# x = x / torch.sqrt(torch.prod(grid_size.to(torch.double)))
# x = x.permute(inv_permute_dims)
#
# # apply the filter
# x = complex_mult(x, kern, dim=2)
#
# # inverse fft
# x = x.permute(permute_dims)
# x = torch.ifft(x, grid_size.numel())
# x = x.permute(inv_permute_dims)
#
# # crop to input size
# crop_starts = tuple(np.array(x.shape).astype(np.int) * 0)
# crop_ends = [x.shape[0], x.shape[1], x.shape[2]]
# for dim in im_size:
# crop_ends.append(int(dim))
# x = x[tuple(map(slice, crop_starts, crop_ends))]
#
# # scaling, assume user handled adjoint scaling with their kernel
# if norm == 'ortho':
# x = x / torch.sqrt(torch.prod(grid_size.to(torch.double)))
#
# return x
|
467470
|
from typing import Dict, List, Any
import json
from bech32 import convertbits, bech32_encode
from pytest import raises
import e2e.Libs.Ristretto.Ristretto as Ristretto
from e2e.Classes.Transactions.Transactions import Send, Data, Transactions
from e2e.Classes.Consensus.SpamFilter import SpamFilter
from e2e.Meros.RPC import RPC
from e2e.Meros.Liver import Liver
from e2e.Tests.RPC.Transactions.GetUTXOs.Lib import verify, mineBlock
from e2e.Tests.Errors import TestError, SuccessError
def TGUUnverifyTest(
rpc: RPC
) -> None:
vectors: Dict[str, Any]
with open("e2e/Vectors/RPC/Transactions/GetUTXOs.json", "r") as file:
vectors = json.loads(file.read())
transactions: Transactions = Transactions.fromJSON(vectors["transactions"])
def test() -> None:
recipient: Ristretto.SigningKey = Ristretto.SigningKey(b'\1' * 32)
recipientPub: bytes = recipient.get_verifying_key()
address: str = bech32_encode("mr", convertbits(bytes([0]) + recipientPub, 8, 5))
otherRecipient: bytes = Ristretto.SigningKey(b'\2' * 32).get_verifying_key()
otherAddress: str = bech32_encode("mr", convertbits(bytes([0]) + otherRecipient, 8, 5))
#Create a Send.
send: Send = Send.fromJSON(vectors["send"])
if rpc.meros.liveTransaction(send) != rpc.meros.live.recv():
raise TestError("Meros didn't broadcast back a Send.")
if rpc.call("transactions", "getUTXOs", {"address": address}) != []:
raise TestError("Meros considered an unconfirmed Transaction's outputs as UTXOs.")
verify(rpc, send.hash)
#Finalize the parent.
for _ in range(6):
mineBlock(rpc)
#Spend it.
spendingSend: Send = Send.fromJSON(vectors["spendingSend"])
if rpc.meros.liveTransaction(spendingSend) != rpc.meros.live.recv():
raise TestError("Meros didn't broadcast back a Send.")
verify(rpc, spendingSend.hash)
if rpc.call("transactions", "getUTXOs", {"address": address}) != []:
raise TestError("Meros didn't consider a verified Transaction's inputs as spent.")
if rpc.call("transactions", "getUTXOs", {"address": otherAddress}) != [{"hash": spendingSend.hash.hex().upper(), "nonce": 0}]:
raise TestError("Meros didn't consider a verified Transaction's outputs as UTXOs.")
#Unverify the spending Send. This would also unverify the parent if it wasn't finalized.
#This is done via causing a Merit Removal.
#Uses two competing Datas to not change the Send's status to competing.
datas: List[Data] = [Data(bytes(32), recipientPub)]
for _ in range(2):
datas.append(Data(datas[0].hash, datas[-1].hash))
for data in datas:
data.sign(recipient)
data.beat(SpamFilter(5))
if rpc.meros.liveTransaction(data) != rpc.meros.live.recv():
raise TestError("Meros didn't broadcast back a Data.")
verify(rpc, data.hash, mr=(datas[-1].hash == data.hash))
#Verify the MeritRemoval happened and the spending Send is no longer verified.
#These first two checks are more likely to symbolize a failure in testing methodology than Meros.
if not rpc.call("merit", "getMerit", {"nick": 0})["malicious"]:
raise TestError("Meros didn't create a Merit Removal.")
if not rpc.call("consensus", "getStatus", {"hash": send.hash.hex()})["verified"]:
raise TestError("Finalized Transaction became unverified.")
if rpc.call("consensus", "getStatus", {"hash": spendingSend.hash.hex()})["verified"]:
raise TestError("Meros didn't unverify a Transaction which is currently below the required threshold.")
#Even after unverification, since the Transaction still exists, the input shouldn't be considered a UTXO.
if rpc.call("transactions", "getUTXOs", {"address": address}) != []:
raise TestError("Meros didn't consider a unverified yet existing Transaction's inputs as spent.")
#That said, its outputs should no longer be considered a UTXO.
if rpc.call("transactions", "getUTXOs", {"address": otherAddress}) != []:
raise TestError("Meros considered a unverified Transaction's outputs as UTXOs.")
raise SuccessError()
#Send Blocks so we have a Merit Holder who can instantly verify Transactions, not to mention Mints.
with raises(SuccessError):
Liver(rpc, vectors["blockchain"], transactions, {50: test}).live()
|
467485
|
import json
from typing import List
from model.transaction import Transaction
from model.wallet import Wallet
class Portfolio:
def __init__(self):
self.wallets = {}
def add_transactions(self, transactions: List[Transaction]):
for transaction in transactions:
if transaction.wallet not in self.wallets:
self.wallets[transaction.wallet] = \
Wallet(transaction.wallet, Wallet.wallet_name(transaction.wallet))
wallet = self.wallets[transaction.wallet]
if transaction.in_amount != 0:
wallet.deposit(transaction.in_currency, transaction.in_amount)
if transaction.out_amount != 0:
wallet.deposit(transaction.out_currency, transaction.out_amount)
if transaction.fee_amount != 0:
wallet.deposit(transaction.fee_currency, transaction.fee_amount)
def to_dict(self):
wallets = self.wallets.values()
return {wallet.name: wallet.to_dict() for wallet in wallets}
@staticmethod
def from_json(payload: str):
data = json.load(payload)
portifolio = Portfolio()
for wallet in data:
portifolio.wallets[wallet] = Wallet.from_dict(data[wallet])
return portifolio
|
467495
|
import nltk
import spacy
class Lang:
def __init__(self):
self.unk_idx = 0
self.pad_idx = 1
self.sou_idx = 2
self.eou_idx = 3
self.word2index = {'__unk__': self.unk_idx, '__pad__': self.pad_idx, '__sou__': self.sou_idx, '__eou__': self.eou_idx}
self.word2count = {'__unk__': 0, '__pad__': 0, '__sou__': 0, '__eou__': 0}
self.index2word = {self.unk_idx: "__unk__", self.pad_idx: "__pad__", self.sou_idx: "__sou__", self.eou_idx: "__eou__"}
self.n_words = 4 # Count default tokens
self.nlp = spacy.load("en_core_web_sm")
# add special case rule
special_case = [{spacy.symbols.ORTH: u"__eou__"}]
self.nlp.tokenizer.add_special_case(u"__eou__", special_case)
def __len__(self):
return self.n_words
def tokenize(self, s):
# return nltk.word_tokenize(s)
return self.nlp.tokenizer(s)
def addSentence(self, sentence):
for word in self.tokenize(sentence):
self.addWord(word.text)
def addSentences(self, sentences):
for sentence in sentences:
for word in self.tokenize(sentence):
self.addWord(word.text)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def transform(self, sentences):
# given unokenized sentences (or iterator), transform to idx mapping
return [[self.word2index[token.text] for token in self.tokenize(sentence) if not token.is_space] for sentence in sentences]
def transform_one(self, sentence):
try:
# given unokenized sentence, transform to idx mapping
return [self.word2index[token.text] for token in self.tokenize(sentence) if not token.is_space]
except KeyError as e:
print(e)
print(sentence)
for token in self.tokenize(sentence):
if not token.is_space:
print(token.text, token.text in self.word2index)
exit(1)
def transform_unk(self, sentence):
# transform with unk
ret = []
for token in self.tokenize(sentence):
if token.text in self.word2index:
ret.append(self.word2index[token.text])
else:
ret.append(self.unk_idx)
return ret
def reverse(self, sentences):
# given transformed sentences, reverse it
return [[self.index2word[idx] for idx in sentence] for sentence in sentences]
def reverse_one(self, sentence):
# given transformed sentence, reverse it
return [self.index2word[idx] for idx in sentence]
# def trim(self, min_freq=100):
# print('vocab size before trimming: ', len(self))
# self.word2count[self.unk_idx] = min_freq
# self.word2count[self.pad_idx] = min_freq
# self.word2count[self.sou_idx] = min_freq
# self.word2count[self.eou_idx] = min_freq
# self.word2count = {k: v for k, v in self.word2count if v >= 100}
# trimmed_word2index = {'__unk__': self.unk_idx, '__pad__': self.pad_idx, '__sou__': self.sou_idx, '__eou__': self.eou_idx}
# trimmed_index2word = {self.unk_idx: "__unk__", self.pad_idx: "__pad__", self.sou_idx: "__sou__", self.eou_idx: "__eou__"}
# self.word2index = trimmed_word2index
# print('vocab size after trimming: ', len(self))
# return self
|
467521
|
def extractQxbluishWordpressCom(item):
'''
Parser for 'qxbluish.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('gtvftbv', 'Guide The Villain Father To Be Virtuous', 'translated'),
('tyrants', 'Golden Age of Phoenix: Tyrant\'s First Class Virtuous Imperial Concubine', 'translated'),
('tfwoiwm', 'The Former Wife of Invisible Wealthy Man', 'translated'),
('fanshu', 'Crossing Into The Emperor’s Body At Night', 'translated'),
('mdwmseipl', 'My Daughter Was My Sworn Enemy In Past Life', 'translated'),
('tpmofbs', 'Transmigration: Petite Mother Of Four Big Shots', 'translated'),
('bhwatp', 'Become Husband and Wife According To Pleasure', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('Golden Age of Phoenix: TFCVIC', 'Golden Age of Phoenix: Tyrant’s First Class Virtuous Imperial Concubine', 'translated'),
('MDWMSEIPL: Chapter ', 'My Daughter Was My Sworn Enemy In Past Life', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
467547
|
from decimal import Decimal
def ensure_type(value, types):
if isinstance(value, types):
return value
else:
raise TypeError('Value {value} is {value_type}, but should be {types}!'.format(
value=value, value_type=type(value), types=types))
class Token:
def __init__(self, weight: Decimal, denorm_weight: Decimal, balance: Decimal, bound: bool):
self.weight = weight
self.denorm_weight = denorm_weight
self.balance = balance
self.bound = bound
def __repr__(self):
return "Token weight: {}, denorm_weight: {}, balance: {}, bound: {}".format(self.weight, self.denorm_weight, self.balance, self.bound)
def __eq__(self, other):
if isinstance(other, Token):
return (self.weight == other.weight) and (self.denorm_weight == other.denorm_weight) and (self.balance == other.balance) and (self.bound == other.bound)
return NotImplemented
def add(self, num):
self.amount = self.amount + num
return self.amount
@property
def balance(self):
return self.__dict__['balance']
@balance.setter
def balance(self, value):
self.__dict__['balance'] = ensure_type(value, Decimal)
|
467589
|
import pytest
from markdown_it import MarkdownIt
@pytest.mark.parametrize(
"input,expected",
[
("#", "<h1></h1>\n"),
("###", "<h3></h3>\n"),
("` `", "<p><code> </code></p>\n"),
("``````", "<pre><code></code></pre>\n"),
("-", "<ul>\n<li></li>\n</ul>\n"),
("1.", "<ol>\n<li></li>\n</ol>\n"),
(">", "<blockquote></blockquote>\n"),
("---", "<hr />\n"),
("<h1></h1>", "<h1></h1>"),
("p", "<p>p</p>\n"),
("[reference]: /url", ""),
],
)
def test_no_end_newline(input, expected):
md = MarkdownIt()
text = md.render(input)
assert text == expected
|
467595
|
from seldon_e2e_utils import (
initial_rest_request,
retry_run,
to_resources_path,
wait_for_rollout,
wait_for_status,
)
def test_xss_escaping(namespace):
sdep_name = "mymodel"
sdep_path = to_resources_path("graph-echo.json")
retry_run(f"kubectl apply -f {sdep_path} -n {namespace}")
wait_for_status(sdep_name, namespace)
wait_for_rollout(sdep_name, namespace)
payload = '<div class="div-class"></div>'
# There is a small difference between the engine and the executor, where
# the engine will escape the `=` symbol as its unicode equivalent, so we
# need to consider both.
expected = '\\u003cdiv class=\\"div-class\\"\\u003e\\u003c/div\\u003e'
res = initial_rest_request(sdep_name, namespace, data=payload, dtype="strData")
# We need to compare raw text (instead of `.json()`). Otherwise, Python
# interprets the escaped sequences.
assert expected in res.text
def test_xss_header(namespace):
sdep_name = "mymodel"
sdep_path = to_resources_path("graph-echo.json")
retry_run(f"kubectl apply -f {sdep_path} -n {namespace}")
wait_for_status(sdep_name, namespace)
wait_for_rollout(sdep_name, namespace)
res = initial_rest_request(sdep_name, namespace)
assert "X-Content-Type-Options" in res.headers
assert res.headers["X-Content-Type-Options"] == "nosniff"
|
467597
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core.files import File
from django.core.files.storage import default_storage
import os
import subprocess
from celery import shared_task
from videokit.apps import VideokitConfig
@shared_task
def generate_video(file_name, source_file_name, options = []):
base = getattr(settings, 'BASE_DIR', '')
media_root = getattr(settings, 'MEDIA_ROOT', '')
source_file = os.path.join(media_root, source_file_name)
if not os.path.exists(source_file):
raise IOError('%s does not exist.' % source_file)
temp_file_dir = os.path.join(base, getattr(settings, 'VIDEOKIT_TEMP_DIR', VideokitConfig.VIDEOKIT_TEMP_DIR))
temp_file = os.path.join(temp_file_dir, os.path.basename(file_name))
if not os.path.exists(temp_file_dir):
try:
os.makedirs(temp_file_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(temp_file_dir):
raise IOError('%s exists and is not a directory.' % temp_file_dir)
process = subprocess.Popen(
['ffmpeg', '-i', source_file, '-y'] + options + [temp_file])
process.wait()
processed_file = os.path.join(base, media_root, file_name)
f = File(open(temp_file, 'r'))
default_storage.save(processed_file, f)
f.close()
os.remove(temp_file)
|
467621
|
import os
import time
import atexit
from django.contrib.auth.models import User
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver;
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver import Remote
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import LiveServerTestCase
from selenium.common.exceptions import NoSuchElementException
from session_security.settings import WARN_AFTER, EXPIRE_AFTER
WAIT_TIME = 5 if not os.environ.get('CI', False) else 30
class SettingsMixin(object):
def setUp(self):
# Give some time for selenium lag
self.min_warn_after = WARN_AFTER
self.max_warn_after = EXPIRE_AFTER * 0.9
self.min_expire_after = EXPIRE_AFTER
self.max_expire_after = EXPIRE_AFTER * 1.5
super(SettingsMixin, self).setUp()
class BaseLiveServerTestCase(SettingsMixin, StaticLiveServerTestCase,
LiveServerTestCase):
fixtures = ['session_security_test_user']
def setUp(self):
SettingsMixin.setUp(self)
from selenium.webdriver.firefox.options import Options as FirefoxOptions
options = FirefoxOptions()
options.add_argument("--headless")
super(LiveServerTestCase, self).setUp()
self.sel= webdriver.Firefox(options=options)
self.sel.get('%s%s' % (self.live_server_url, '/admin/'))
self.sel.find_element_by_name('username').send_keys('test')
self.sel.find_element_by_name('password').send_keys('<PASSWORD>')
self.sel.find_element_by_xpath('//input[@value="Log in"]').click()
self.sel.execute_script('window.open("/admin/", "other")')
def press_space(self):
body = self.sel.find_element_by_tag_name("body")
body.send_keys(Keys.SPACE)
def tearDown(self):
self.sel.quit()
@classmethod
def tearDownClass(cls):
super(BaseLiveServerTestCase, cls).tearDownClass()
|
467630
|
from context import arkouda as ak
import numpy as np
import pandas as pd
from time import time
from base_test import ArkoudaTest
def compare_strategies(length, ncat, op, dtype):
keys = ak.randint(0, ncat, length)
if dtype == 'int64':
vals = ak.randint(0, length//ncat, length)
elif dtype == 'bool':
vals = ak.zeros(length, dtype='bool')
for i in np.random.randint(0, length, ncat//2):
vals[i] = True
else:
vals = ak.linspace(-1, 1, length)
print("Global groupby", end=' ')
start = time()
gg = ak.GroupBy(keys, False)
ggtime = time() - start
print(ggtime)
print("Global reduce", end=' ')
start = time()
gk, gv = gg.aggregate(vals, op)
grtime = time() - start
print(grtime)
print("Local groupby", end=' ')
start = time()
lg = ak.GroupBy(keys, True)
lgtime = time() - start
print(lgtime)
print("Local reduce", end=' ')
start = time()
lk, lv = lg.aggregate(vals, op)
lrtime = time() - start
print(lrtime)
print(f"Keys match? {(gk == lk).all()}")
print(f"Absolute diff of vals = {ak.abs(gv - lv).sum()}")
return ggtime, grtime, lgtime, lrtime
class GroupByCompareStrategiesTest(ArkoudaTest):
if __name__ == '__main__':
import sys
if len(sys.argv) < 5:
print(f"Usage: {sys.argv[0]} <server> <port> <length> <num_categories> [op [dtype]]")
if len(sys.argv) < 6:
op = 'sum'
else:
op = sys.argv[5]
if len(sys.argv) < 7:
dtype = 'float64'
else:
dtype = sys.argv[6]
ak.connect(sys.argv[1], int(sys.argv[2]))
compare_strategies(int(sys.argv[3]), int(sys.argv[4]), op, dtype)
sys.exit()
|
467668
|
from __future__ import unicode_literals, division, print_function, absolute_import
from builtins import zip
import os
import random
import numpy as np
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression as logreg
import sklearn.metrics as skmet
from nlputils.features import FeatureTransform, features2mat
from nlputils.dict_utils import invert_dict0, combine_dicts
from .vis_utils import create_wordcloud, scores2html
from .distinctive_words import get_distinctive_words
def select_subset(textdict, doccats, visids=[]):
"""
select a random subset of the dataset if it contains more than 10000 examples
Input and Returns:
textdict: dict with {doc_id: text}
doccats: dict with {doc_id: category}
visids: a subset of docids for which the html visualization should be created
"""
docids = sorted(textdict.keys()) # sort for consistency across OS
random.seed(42)
random.shuffle(docids)
# visualize up to 1000 documents
if not len(visids):
visids = docids[:1000]
elif len(visids) > 1000:
print("WARNING: creating visualizations for %i, i.e. more than 1000 documents can be slow!" % len(visids))
if len(visids) > 10000:
print("You don't know what you're doing....Truncating visids to 5000 examples.")
visids = visids[:5000]
# select subsets of examples to speed up the computations
if len(docids) > 10000:
# always make sure you end up with exactly 10k random examples (incl visids) but also don't shuffle a lot more than 10k ids
docids = list(set(docids[:10000+len(visids)]).difference(set(visids)))
random.shuffle(docids)
docids = docids[:10000-len(visids)] + visids
textdict = {d: textdict[d] for d in docids}
doccats = {d: doccats[d] for d in docids}
return textdict, doccats, visids
def visualize_tfidf(textdict, doccats, create_html=True, visids=[], subdir_html='', subdir_wc='', maskfiles={}):
"""
visualize a text categorization dataset w.r.t. tf-idf features (create htmls with highlighted words and word clouds)
Input:
textdict: dict with {doc_id: text}
doccats: dict with {doc_id: category}
create_html: whether to create the html files with scores highlighted for individual documents (default: True)
visids: a subset of docids for which the html visualization should be created (optional)
(if create_html=True but visids=[], select up to 1000 random ids)
subdir_html: subdirectory to save the created html files in (has to exist)
subdir_wc: subdirectory to save the created word cloud images in (has to exist)
maskfiles: dict with {category: path_to_maskfile} for creating the word clouds in a specific form
Returns:
relevant_words: dict with {category: {word: relevancy score}}
"""
print("possibly selecting subset of 10000 examples")
textdict, doccats, visids = select_subset(textdict, doccats, visids)
print("transforming text into features")
# we can identify bigrams if we don't have to create htmls
ft = FeatureTransform(norm='max', weight=True, renorm='max', identify_bigrams=not create_html, norm_num=False)
docfeats = ft.texts2features(textdict)
# maybe highlight the tf-idf scores in the documents
if create_html:
print("creating htmls for %i of %i documents" % (len(visids), len(docfeats)))
for i, did in enumerate(visids):
if not i % 100:
print("progress: at %i of %i documents" % (i, len(visids)))
metainf = did + '\n' + 'True Class: %s\n' % doccats[did]
name = did + '_' + doccats[did]
scores2html(textdict[did], docfeats[did], os.path.join(subdir_html, name.replace(' ', '_').replace('/', '_')), metainf)
# get a map for each category to the documents belonging to it
catdocs = invert_dict0(doccats)
# create word clouds for each category by summing up tfidf scores
scores_collected = {}
for cat in catdocs:
print("creating word cloud for category %r with %i samples" % (cat, len(catdocs[cat])))
scores_collected[cat] = {}
for did in catdocs[cat]:
scores_collected[cat] = combine_dicts(scores_collected[cat], docfeats[did], sum)
# create word cloud
create_wordcloud(scores_collected[cat], os.path.join(subdir_wc, "%s.png" % cat), maskfiles[cat] if cat in maskfiles else None)
return scores_collected
def visualize_clf(textdict, doccats, create_html=True, visids=[], subdir_html='', subdir_wc='', maskfiles={}, use_logreg=False):
"""
visualize a text categorization dataset w.r.t. classification scores (create htmls with highlighted words and word clouds)
Input:
textdict: dict with {doc_id: text}
doccats: dict with {doc_id: category}
create_html: whether to create the html files with scores highlighted for individual documents (default: True)
visids: a subset of docids for which the html visualization should be created (optional)
(if create_html=True but visids=[], select up to 1000 random ids)
subdir_html: subdirectory to save the created html files in (has to exist)
subdir_wc: subdirectory to save the created word cloud images in (has to exist)
maskfiles: dict with {category: path_to_maskfile} for creating the word clouds in a specific form
use_logreg: default False; whether to use logistic regression instead of linear SVM
Returns:
relevant_words: dict with {category: {word: relevancy score}}
"""
print("possibly selecting subset of 10000 examples")
textdict, doccats, visids = select_subset(textdict, doccats, visids)
# training examples are all but visids
trainids = list(set(textdict.keys()).difference(set(visids)))
# train a classifier and predict
if use_logreg:
renorm = 'max'
clf = logreg(class_weight='balanced', random_state=1)
else:
renorm = 'length'
clf = LinearSVC(C=10., class_weight='balanced', random_state=1)
print("transforming text into features")
# make features (we can use bigrams if we don't have to create htmls)
ft = FeatureTransform(norm='max', weight=True, renorm=renorm, identify_bigrams=not create_html, norm_num=False)
docfeats = ft.texts2features(textdict, fit_ids=trainids)
# convert training data to feature matrix
featmat_train, featurenames = features2mat(docfeats, trainids)
y_train = [doccats[tid] for tid in trainids]
# fit classifier
print("training classifier")
clf.fit(featmat_train, y_train)
del featmat_train
# make test featmat and label vector
print("making predictions")
featmat_test, featurenames = features2mat(docfeats, visids, featurenames)
# get actual classification results for all test samples
predictions = clf.decision_function(featmat_test)
predictions_labels = clf.predict(featmat_test)
y_true, y_pred = [doccats[tid] for tid in visids], list(predictions_labels)
# report classification accuracy
if len(clf.classes_) > 2:
f1_micro, f1_macro = skmet.f1_score(y_true, y_pred, average='micro'), skmet.f1_score(y_true, y_pred, average='macro')
print("F1 micro-avg: %.3f, F1 macro-avg: %.3f" % (f1_micro, f1_macro))
print("Accuracy: %.3f" % skmet.accuracy_score(y_true, y_pred))
# create the visualizations
print("creating the visualization for %i test examples" % len(visids))
# collect all the accumulated scores to later create a wordcloud
scores_collected = np.zeros((len(featurenames), len(clf.classes_)))
# run through all test documents
for i, tid in enumerate(visids):
if not i % 100:
print("progress: at %i of %i test examples" % (i, len(visids)))
# transform the feature vector into a diagonal matrix
feat_vec = lil_matrix((len(featurenames), len(featurenames)), dtype=float)
feat_vec.setdiag(featmat_test[i, :].toarray().flatten())
feat_vec = csr_matrix(feat_vec)
# get the scores (i.e. before summing up)
scores = clf.decision_function(feat_vec)
# adapt for the intercept
scores -= (1. - 1./len(featurenames)) * clf.intercept_
# when creating the html visualization we want the words speaking for the prediction
# but when creating the word cloud, we want the words speaking for the actual class
metainf = tid + '\n'
# binary or multi class?
if len(scores.shape) == 1:
if clf.classes_[0] == predictions_labels[i]:
# we want the scores which speak for the class - for the negative class,
# the sign needs to be reversed
scores *= -1.
scores_dict = dict(zip(featurenames, scores))
metainf += 'True Class: %s\n' % doccats[tid]
metainf += 'Predicted Class: %s (Score: %.4f)' % (predictions_labels[i], predictions[i])
scores_collected[:, clf.classes_ == doccats[tid]] += np.array([scores]).T
else:
scores_dict = dict(zip(featurenames, scores[:, clf.classes_ == predictions_labels[i]][:, 0]))
metainf += 'True Class: %s (Score: %.4f)\n' % (doccats[tid], predictions[i, clf.classes_ == doccats[tid]][0])
metainf += 'Predicted Class: %s (Score: %.4f)' % (predictions_labels[i], predictions[i, clf.classes_ == predictions_labels[i]][0])
scores_collected[:, clf.classes_ == doccats[tid]] += scores[:, clf.classes_ == doccats[tid]]
# use the vector with scores together with the corresponding feature names and the original text
# to create the pretty visualization
if create_html:
if y_true[i] == y_pred[i]:
name = 'correct_'
else:
name = 'error_'
name += tid + '_' + doccats[tid]
scores2html(textdict[tid], scores_dict, os.path.join(subdir_html, name.replace(' ', '_').replace('/', '_')), metainf)
print("creating word clouds")
# normalize the scores for each class
scores_collected /= np.max(np.abs(scores_collected), axis=0)
# transform the collected scores into a dictionary and create word clouds
scores_collected_dict = {cat: dict(zip(featurenames, scores_collected[:, clf.classes_ == cat][:, 0])) for cat in clf.classes_}
for cat in scores_collected_dict:
create_wordcloud(scores_collected_dict[cat], os.path.join(subdir_wc, "%s.png" % cat), maskfiles[cat] if cat in maskfiles else None)
return scores_collected_dict
def visualize_distinctive(textdict, doccats, subdir_wc='', maskfiles={}):
"""
visualize a text categorization dataset by creating word clouds of `distinctive' words
Input:
textdict: dict with {doc_id: text}
doccats: dict with {doc_id: category}
subdir_wc: subdirectory to save the created word cloud images in (has to exist)
maskfiles: dict with {category: path_to_maskfile} for creating the word clouds in a specific form
Returns:
relevant_words: dict with {category: {word: relevancy score}}
"""
print("possibly selecting subset of 10000 examples")
textdict, doccats, _ = select_subset(textdict, doccats, {})
print("get 'distinctive' words")
# this contains a dict for every category with {word: trend_score_for_this_category}
distinctive_words = get_distinctive_words(textdict, doccats)
# create the corresponding word clouds
print("creating word clouds")
for cat in distinctive_words:
create_wordcloud(distinctive_words[cat], os.path.join(subdir_wc, "%s.png" % cat), maskfiles[cat] if cat in maskfiles else None)
return distinctive_words
|
467688
|
import math
import numpy as np
def sum_circle(data, x, y, r):
"""Sum array values that fall within the given circle.
Parameters
----------
data : numpy.ndarray
The array to sum.
x, y, r : float
The center and radius of circle, in array coordinates.
"""
imin = math.floor((x - r) + 0.5)
imax = math.floor((x + r) + 0.5)
jmin = math.floor((y - r) + 0.5)
jmax = math.floor((y + r) + 0.5)
irange = range(imin, imax+1)
jrange = range(jmin, jmax+1)
data_stamp = data[jmin:jmax+1, imin:imax+1]
X, Y = np.meshgrid(irange, jrange)
mask = (X - x)**2 + (Y - y)**2 < r**2
return np.sum(data_stamp[mask])
|
467785
|
import morepath
import {{ cookiecutter.package_name }}
from webtest import TestApp as Client
def test_root():
morepath.scan({{ cookiecutter.package_name }})
morepath.commit({{ cookiecutter.package_name }}.App)
client = Client({{ cookiecutter.package_name }}.App())
root = client.get('/')
assert root.status_code == 200
{%- if 'traditional' in cookiecutter.goal %}
assert '/greeting/world' in root
assert '/greeting/mundo' in root
{%- else %}
assert len(root.json['greetings']) == 2
{%- endif %}
|
467796
|
from netforce.model import Model,fields,get_model
class Settings(Model):
_name="ecom2.settings"
_string="Settings"
_fields={
"delivery_slot_discount": fields.Decimal("Same Delivery Slot Discount"),
"delivery_max_days": fields.Integer("Delivery Max Days"),
"delivery_min_hours": fields.Integer("Delivery Min Hours"),
"ecom_num_lots": fields.Integer("Number Of Lots To Show On Website"),
"sale_lead_time_nostock": fields.Integer("Sale Lead Time When Out Of Stock (Days)"),
"ecom_return_url": fields.Char("Return URL of ecommerce frontend"),
"extra_ship_addresses": fields.One2Many("address","related_id","Extra Shipping Addresses"),
"work_time_start": fields.Char("Work Time Start"),
"work_time_end": fields.Char("Work Time End"),
}
Settings.register()
|
467806
|
from __future__ import annotations
from typing import Any
from dependency_injector.wiring import Container, Provide, inject
from rich.style import Style
from rich.text import Text
from textual.layouts.grid import GridLayout
from textual.views import GridView
from textual.widget import Widget
from textual.widgets import ButtonPressed
from .. import styles
from ..containers import Container
from ..jenkins import Jenkins
from ..widgets import (
ButtonWidget,
FlashMessageType,
ShowFlashNotification,
TextInputFieldWidget,
)
from .base import BaseView
class BuildWithParametersView(BaseView):
"""Used to display the build with parameters options."""
@inject
def __init__(
self, job: dict[str, Any], client: Jenkins = Provide[Container.client]
) -> None:
"""Used to display the build with parameters options.
Args:
job (dict[str, Any]): A job dictionary.
# noqa: DAR101 client
"""
super().__init__()
self.layout = GridLayout()
self.job = job
self.client = client
self.fields: list[Widget] = []
self.current_button: ButtonWidget | None = None
async def on_show(self) -> None:
await self.app.set_focus(self.fields[0])
async def on_mount(self) -> None:
"""Actions that are executed when the widget is mounted."""
self.layout.add_column("col1", size=60)
self.layout.set_align("center")
self.layout.set_gutter(0, 0)
# Fields
for parameter in self.job["property"][0]["parameterDefinitions"]:
name = parameter["name"]
title = name.lower().replace("_", " ").capitalize()
placeholder = Text(
parameter["description"] or "", style=Style(italic=True, dim=True)
)
default_parameter = (
parameter["defaultParameterValue"].get("value", "")
if parameter["defaultParameterValue"]
else ""
)
field = TextInputFieldWidget(
name=name,
title=title,
placeholder=placeholder,
default_value=default_parameter,
choices=parameter.get("choices", []),
border_style=styles.PURPLE
# validation_regex=parameter.get("validationRegex", None),
)
self.layout.add_row(f"row_{name}", size=3)
self.fields.append(field)
self.layout.place(*self.fields)
self.layout.add_row("row_buttons", size=5)
await self.add_button(text="ok")
await self.add_button(text="cancel")
buttons_view = GridView()
buttons_view.grid.add_column("ok", size=15)
buttons_view.grid.add_column("cancel", size=15)
buttons_view.grid.add_row("row", size=3)
buttons_view.grid.set_align("center", "center")
buttons_view.grid.place(*list(self.buttons.values()))
self.layout.place(*[buttons_view])
# set focus on first text input field
await self.refresh_layout()
async def handle_button_pressed(self, message: ButtonPressed) -> None:
"""Handles button pressed messages.
Args:
message (ButtonPressed): The button pressed message.
"""
# reset previous current button toggle value
self.log(f"Handling button press: {message.name}")
message.stop()
if self.current_button:
self.current_button.toggle = False
# set new current button toggle value
self.current_button = message.sender
assert isinstance(self.current_button, ButtonWidget)
self.current_button.toggle = True
if message.sender.name == "cancel":
await self.parent.action("history")
if message.sender.name == "ok":
parameters = {}
validation_failed = False
for field in self.fields:
valid = await field.validate()
parameters[field.name] = field.value
if not valid:
validation_failed = True
self.current_button.toggle = False
if not validation_failed:
field.value = field.default_value or ""
try:
await self.client.build(
path=self.parent.path, parameters=parameters
)
await self.post_message_from_child(
ShowFlashNotification(
self, value="Build scheduled", type=FlashMessageType.SUCCESS
)
)
except Exception as e:
await self.post_message_from_child(
ShowFlashNotification(
self, type=FlashMessageType.ERROR, value=str(e)
)
)
await self.parent.action("history")
self.refresh(layout=True)
|
467819
|
import base64
from collections import namedtuple
import pytest
KubeResult = namedtuple("KubeResult", ["data"])
# ===========
# base secret
# ===========
def test_secret_get(gsecret):
with pytest.raises(NotImplementedError) as exc:
gsecret.get("foo")
assert "" in str(exc.value)
def test_secret_set(gsecret):
with pytest.raises(NotImplementedError) as exc:
gsecret.set("foo", "bar")
assert "" in str(exc.value)
def test_secret_get_all(gsecret):
with pytest.raises(NotImplementedError) as exc:
gsecret.get_all()
assert "" in str(exc.value)
def test_secret_set_all(gsecret):
with pytest.raises(NotImplementedError) as exc:
gsecret.set_all({})
assert "" in str(exc.value)
# ============
# vault secret
# ============
def test_vault_secret_verify_cert(gvault_secret, tmpdir):
cacert_file = tmpdir.join("cacert.pem")
cacert_file.write("cacert")
cert_file = tmpdir.join("cert.pem")
cert_file.write("cert")
key_file = tmpdir.join("key.pem")
key_file.write("key")
cert, verify = gvault_secret._verify_cert(
"https", True, str(cacert_file), str(cert_file), str(key_file),
)
assert cert == (str(cert_file), str(key_file))
assert verify == str(cacert_file)
def test_vault_secret_role_id(gvault_secret, tmpdir):
file_ = tmpdir.join("vault_role_id.txt")
file_.write("role-id")
gvault_secret.settings["CN_SECRET_VAULT_ROLE_ID_FILE"] = str(file_)
assert gvault_secret.role_id == "role-id"
def test_vault_secret_role_id_missing(gvault_secret):
assert gvault_secret.role_id == ""
def test_vault_secret_secret_id(gvault_secret, tmpdir):
file_ = tmpdir.join("vault_secret_id.txt")
file_.write("secret-id")
gvault_secret.settings["CN_SECRET_VAULT_SECRET_ID_FILE"] = str(file_)
assert gvault_secret.secret_id == "secret-id"
def test_vault_secret_secret_id_missing(gvault_secret):
assert gvault_secret.secret_id == ""
def test_vault_secret_authenticate_authenticated(gvault_secret, monkeypatch):
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: True,
)
assert gvault_secret._authenticate() is None
def test_vault_secret_authenticate_not_authenticated(gvault_secret, monkeypatch):
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: False,
)
monkeypatch.setattr(
"hvac.api.auth_methods.approle.AppRole.login",
lambda cls, role_id, secret_id, use_token: {"auth": {"client_token": "token"}}
)
gvault_secret._authenticate()
assert gvault_secret.client.token == "token"
def test_vault_secret_get(gvault_secret, monkeypatch):
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: True,
)
monkeypatch.setattr(
"hvac.Client.read",
lambda cls, key: {"data": {"value": "bar"}},
)
assert gvault_secret.get("foo") == "bar"
def test_vault_secret_get_default(gvault_secret, monkeypatch):
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: True,
)
monkeypatch.setattr(
"hvac.Client.read",
lambda cls, key: {},
)
assert gvault_secret.get("foo", "default") == "default"
def test_vault_secret_set(gvault_secret, monkeypatch):
from collections import namedtuple
Response = namedtuple("Response", ["status_code"])
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: True,
)
monkeypatch.setattr(
"hvac.adapters.Request.post",
lambda cls, url, json: Response(204),
)
assert gvault_secret.set("foo", "bar") is True
def test_vault_secret_all(gvault_secret, monkeypatch):
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: True,
)
monkeypatch.setattr(
"hvac.Client.list",
lambda cls, key: {"data": {"keys": ["foo"]}},
)
monkeypatch.setattr(
"hvac.Client.read",
lambda cls, key: {"data": {"value": "bar"}},
)
assert gvault_secret.all() == {"foo": "bar"}
def test_vault_secret_all_empty(gvault_secret, monkeypatch):
monkeypatch.setattr(
"hvac.Client.is_authenticated",
lambda cls: True,
)
monkeypatch.setattr(
"hvac.Client.list",
lambda cls, key: None,
)
assert gvault_secret.all() == {}
def test_vault_secret_request_warning(gvault_secret, caplog):
gvault_secret._request_warning("https", False)
assert "All requests to Vault will be unverified" in caplog.records[0].message
# =================
# kubernetes secret
# =================
def test_k8s_secret_prepare_secret_read(gk8s_secret, monkeypatch):
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.read_namespaced_secret",
lambda cls, n, ns: KubeResult(data={"foo": base64.b64encode(b"bar")}),
)
gk8s_secret._prepare_secret()
assert gk8s_secret.name_exists is True
def test_k8s_secret_prepare_secret_create(gk8s_secret, monkeypatch):
import kubernetes.client.rest
def _raise_exc(status):
raise kubernetes.client.rest.ApiException(status=status)
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.read_namespaced_secret",
lambda cls, n, ns: _raise_exc(404),
)
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.create_namespaced_secret",
lambda cls, n, ns: KubeResult(data={"foo": base64.b64encode(b"bar")}),
)
gk8s_secret._prepare_secret()
assert gk8s_secret.name_exists is True
def test_k8s_secret_prepare_secret_not_created(gk8s_secret, monkeypatch):
import kubernetes.client.rest
def _raise_exc(status):
raise kubernetes.client.rest.ApiException(status=status)
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.read_namespaced_secret",
lambda cls, n, ns: _raise_exc(500),
)
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.create_namespaced_secret",
lambda cls, n, ns: KubeResult(data={"foo": base64.b64encode(b"bar")}),
)
with pytest.raises(kubernetes.client.rest.ApiException):
gk8s_secret._prepare_secret()
assert gk8s_secret.name_exists is False
def test_k8s_secret_get(gk8s_secret, monkeypatch):
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.read_namespaced_secret",
lambda cls, n, ns: KubeResult(data={"foo": base64.b64encode(b"bar")}),
)
assert gk8s_secret.get("foo") == "bar"
def test_k8s_secret_get_default(gk8s_secret, monkeypatch):
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.read_namespaced_secret",
lambda cls, n, ns: KubeResult(data={}),
)
assert gk8s_secret.get("foo", "default") == "default"
def test_k8s_secret_set(gk8s_secret, monkeypatch):
gk8s_secret.name_exists = True
monkeypatch.setattr(
"kubernetes.client.CoreV1Api.patch_namespaced_secret",
lambda cls, n, ns, body: KubeResult(data={})
)
assert gk8s_secret.set("foo", "bar") is True
def test_k8s_secret_incluster():
import kubernetes.config.config_exception
from jans.pycloudlib.secret import KubernetesSecret
secret = KubernetesSecret()
with pytest.raises(kubernetes.config.config_exception.ConfigException):
secret.client
|
467842
|
from ciscoconfparse import CiscoConfParse
from pprint import pprint
bgp_config = """
router bgp 44
bgp router-id 10.220.88.38
address-family ipv4 unicast
!
neighbor 10.220.88.20
remote-as 42
description pynet-rtr1
address-family ipv4 unicast
route-policy ALLOW in
route-policy ALLOW out
!
!
neighbor 10.220.88.32
remote-as 43
address-family ipv4 unicast
route-policy ALLOW in
route-policy ALLOW out
"""
# When feeding config directly - CiscoConfParse requires a list
bgp_obj = CiscoConfParse(bgp_config.splitlines())
# Result of find_objects_w_parents will be the child objects
bgp_peers = []
neighbors = bgp_obj.find_objects_w_parents(
parentspec=r"router bgp", childspec=r"neighbor"
)
for neighbor in neighbors:
_, neighbor_ip = neighbor.text.split()
for child in neighbor.children:
if "remote-as" in child.text:
_, remote_as = child.text.split()
bgp_peers.append((neighbor_ip, remote_as))
print()
print("BGP Peers: ")
pprint(bgp_peers)
print()
|
467911
|
import os
import torch
class BasenjiDataset(torch.utils.data.Dataset):
def __init__(self, human_file, mouse_file):
self._human_file = human_file
self._mouse_file = mouse_file
self._human_data = torch.load(self._human_file)
self._mouse_data = torch.load(self._mouse_file)
@property
def human_data(self):
return self._human_data
@property
def mouse_data(self):
return self._mouse_data
def __len__(self):
return len(self.human_data)
def __getitem__(self, idx):
return {"human": {"sequence": self.human_data["sequence"][idx],
"target": self.human_data["target"][idx]},
"mouse": {"sequence": self.mouse_data["sequence"][idx],
"target": self.mouse_data["target"][idx]}}
|
467916
|
import logging as log
import shutil
import sys
import time
from pathlib import Path
from halo import Halo
from . import errors, utils
from .stages import Source, SourceType
def discover_implied_stage(filename, config, possible_dests=None):
"""
Use the mapping from filename extensions to stages to figure out which
stage was implied.
"""
if filename is None:
raise errors.NoInputFile(possible_dests)
suffix = Path(filename).suffix
for (name, stage) in config["stages"].items():
if "file_extensions" in stage:
for ext in stage["file_extensions"]:
if suffix == ext:
return name
# no stages corresponding with this file extension where found
raise errors.UnknownExtension(filename)
def construct_path(args, config, through):
"""
Construct the path of stages implied by the passed arguments.
"""
# find source
source = args.source
if source is None:
source = discover_implied_stage(args.input_file, config)
# find target
target = args.dest
if target is None:
target = discover_implied_stage(args.output_file, config)
path = config.REGISTRY.make_path(source, target, through)
if path is None:
raise errors.NoPathFound(source, target, through)
# If the path doesn't execute anything, it is probably an error.
if len(path) == 0:
raise errors.TrivialPath(source)
return path
def run_fud(args, config):
"""
Execute all the stages implied by the passed `args`
"""
# check if input_file exists
input_file = None
if args.input_file is not None:
input_file = Path(args.input_file)
if not input_file.exists():
raise FileNotFoundError(input_file)
path = construct_path(args, config, args.through)
# check if we need `-o` specified
if path[-1].output_type == SourceType.Directory and args.output_file is None:
raise errors.NeedOutputSpecified(path[-1])
# if we are doing a dry run, print out stages and exit
if args.dry_run:
print("fud will perform the following steps:")
for ed in path:
print(f"Stage: {ed.name}")
ed.dry_run()
return
# spinner is disabled if we are in debug mode, doing a dry_run, or are in quiet mode
spinner_enabled = not (utils.is_debug() or args.dry_run or args.quiet)
# Execute the path transformation specification.
with Halo(
spinner="dots", color="cyan", stream=sys.stderr, enabled=spinner_enabled
) as sp:
sp = utils.SpinnerWrapper(sp, save=log.getLogger().level <= log.INFO)
# construct a source object for the input
data = None
if input_file is None:
data = Source(None, SourceType.UnTyped)
else:
data = Source(Path(str(input_file)), SourceType.Path)
profiled_stages = utils.parse_profiling_input(args)
# tracks profiling information requested by the flag (if set).
collected_for_profiling = {}
# tracks the approximate time elapsed to run each stage.
overall_durations = []
# run all the stages
for ed in path:
txt = f"{ed.src_stage} → {ed.target_stage}" + (
f" ({ed.name})" if ed.name != ed.src_stage else ""
)
sp.start_stage(txt)
try:
if ed._no_spinner:
sp.stop()
begin = time.time()
data = ed.run(data, sp=sp if ed._no_spinner else None)
overall_durations.append(time.time() - begin)
sp.end_stage()
except errors.StepFailure as e:
sp.fail()
print(e)
exit(-1)
# Collect profiling information for this stage.
if ed.name in profiled_stages:
collected_for_profiling[ed.name] = ed
sp.stop()
if args.profiled_stages is not None:
if data is None:
data = Source("", SourceType.String)
else:
# Overwrite previous data type.
data.typ = SourceType.String
if args.profiled_stages == []:
# No stages provided; collect overall stage durations.
data.data = utils.profile_stages(
"stage", [ed for ed in path], overall_durations, args.csv
)
else:
# Otherwise, gather profiling data for each stage and steps provided.
def gather_profiling_data(stage, steps):
data = collected_for_profiling.get(stage)
# Verify this is a valid stage.
if data is None:
raise errors.UndefinedStage(stage)
# Verify the steps are valid.
valid_steps = [s.name for s in data.steps]
invalid_steps = [s for s in steps if s not in valid_steps]
if invalid_steps:
raise errors.UndefinedSteps(stage, invalid_steps)
# If no specific steps provided for this stage, append all of them.
profiled_steps = [
s for s in data.steps if steps == [] or s.name in steps
]
# Gather all the step names that are being profiled.
profiled_names = [s.name for s in profiled_steps]
profiled_durations = [data.durations[s] for s in profiled_names]
return utils.profile_stages(
stage,
profiled_steps,
profiled_durations,
args.csv,
)
data.data = "\n".join(
gather_profiling_data(stage, steps)
for stage, steps in profiled_stages.items()
)
# output the data or profiling information.
if args.output_file is not None:
if data.typ == SourceType.Directory:
shutil.move(data.data.name, args.output_file)
else:
with Path(args.output_file).open("wb") as f:
f.write(data.convert_to(SourceType.Bytes).data)
elif data:
print(data.convert_to(SourceType.String).data)
|
467922
|
import psnr
import ssim
import os
import sys
import cv2
import scipy.misc
import uqim_utils
import numpy as np
#import matlab.engine
import cv2
import imgqual_utils
from PIL import Image
#author:yetian
#time:2020/12/7
# ref_file = r'sample1.jpg'
# dist_file = r'sample1_tmp.jpg'
#ref_path = r'D:\underwaterImageDateset\reference-890' #参考图像目录
ref_path =r'D:\github\Image-quality-measure-method\groundtruth_test'
#ref_path = r'D:\underwaterImageDateset\underwater_imagenet_UGAN\underwater_imagenet\trainB'
#dist_path =r'D:\python_code\Single-Underwater-Image-Enhancement-and-Color-Restoration-master\Underwater Image Color Restoration\UDCP\OutputImages' #测试图像目录
#dist_path = r'D:\python_code\Single-Underwater-Image-Enhancement-and-Color-Restoration-master\Underwater-Image-Enhancement-based-on-Fusion-Python-main\OutputImages'
#dist_path = r'D:\github\cv-paper-reproduction\UDCP-RAW890results'
#dist_path = r'D:\underwaterImageDateset\newtest90_FullA'
#dist_path =r'D:\github\Underwater-ColorRestoration-study\RGB_CC2_results'
dist_path = r'D:\github\MSR-D-enhance-underwater-image\test90_FullA'
dist_path = r'D:\github\PyTorch-Image-Dehazing\results'
#dist_path = r'D:\underwaterImageDateset\water-net-test90\sample'
#dist_path = r'D:\github\MSR-D-enhance-underwater-image\OUR-RAW890dataset_results'
#dist_path = r'D:\underwaterImageDateset\underwater_imagenet_UGAN\underwater_imagenet\OURS'
#dist_path = r'D:\github\cv-paper-reproduction\water-net\sample1-90'
#dist_path = r'D:\underwaterImageDateset\test-90Fusion'
#dist_path =r'D:\github\Over-all-New-underwater-enhancement\Cc_test90'
#dist_path = r'D:\github\cv-paper-reproduction\fusion-optimization\Underwater-Image-Enhancement-based-on-Fusion-Python-main\test90_results'
#dist_path =r'D:\python_code\Single-Underwater-Image-Enhancement-and-Color-Restoration-master\Underwater Image Color Restoration\UDCP\OutputImages'
#dist_path = r'D:\python_code\Single-Underwater-Image-Enhancement-and-Color-Restoration-master\Underwater Image Color Restoration\DCP\OutputImages'
#dist_path =r'D:\python_code\Single-Underwater-Image-Enhancement-and-Color-Restoration-master\Underwater Image Enhancement\CLAHE\OutputImages'
#dist_path = r'D:\python_code\Single-Underwater-Image-Enhancement-and-Color-Restoration-master\Underwater Image Enhancement\HE\OutputImages'
def cv_show(img,name):
cv2.imshow(img,name)
cv2.waitKey(0)
cv2.destroyAllWindows()
ref_filelist = os.listdir(ref_path) #参考图像文件列表
dist_filelist = os.listdir(dist_path) #测试图像文件列表
save_file = 'OURS_test90_2021_3_6.txt'
#save_file = r'water-net_2021_1_2_11.txt'
psnr_list=[]
ssim_list=[]
uiqm_list=[]
uciqe_list = []
mse_list = []
#eng = matlab.engine.start_matlab()
for dist_file in dist_filelist: #遍历
dist_file_dir = os.path.join(dist_path,dist_file) #文件绝对路径
if os.path.isdir(dist_file_dir): #如果是文件夹,跳过
continue
dist_img = Image.open(dist_file_dir)
#dist_img = cv2.imread(dist_file_dir)
dist_img = np.float32(dist_img)
filename = os.path.splitext(dist_file)[0] + '.jpg' #ref filename
ref_img = Image.open(ref_path + '\\' + filename)
#ref_img = cv2.imread(ref_path+'\\'+filename)
ref_img = np.float32(ref_img)
psnr_data = imgqual_utils.getPSNR(np.uint8(dist_img),np.uint8(ref_img))
ssim_data = imgqual_utils.getSSIM(dist_img,ref_img)
#psnr_data = psnr.psnr(ref_img,dist_img) #psnr指标
#ssim_data = ssim.ssim_exact(ref_img/255,dist_img/255) #ssim指标
#uciqe_data = eng.test_UCIQE2py(dist_file_dir)
uciqe_data = 0
#mse_data = psnr.MSE(ref_img,dist_img) #MSE指标
print("img:" + str(filename)+" psnr:" + str(psnr_data) + " ssim:"+str(ssim_data)+" UCIQE:"+str(uciqe_data))
data = str(filename)+" psnr:" + str(psnr_data) + " ssim:"+str(ssim_data) + " UCIQE:"+str(uciqe_data)
psnr_list.append(psnr_data)
ssim_list.append(ssim_data)
#uiqm_list.append(uiqm_data)
uciqe_list.append(uciqe_data)
#mse_list.append(mse_data/1000)
average = "psnr_average:" + str(sum(psnr_list) / len(psnr_list)) + " ssim_average:" + str(sum(ssim_list)/len(ssim_list))+" UCIQE:"+str(sum(uciqe_list)/len(uciqe_list)) # " MSE:"+str(sum(mse_list)/len(mse_list))
print(average)
with open(save_file,"a") as file:
file.write(data + " "+average +'\n')
|
467924
|
import jax.numpy as jnp
from jax import jit
from jax.lax import scan
@jit
def erfcx(x):
"""erfcx (float) based on Shepherd and Laframboise (1981)
Scaled complementary error function exp(-x*x) erfc(x)
Args:
x: should be larger than -9.3
Returns:
jnp.array: erfcx(x)
Note:
We acknowledge the post in stack overflow (https://stackoverflow.com/questions/39777360/accurate-computation-of-scaled-complementary-error-function-erfcx).
"""
a=jnp.abs(x)
q = (-a*(a-2.0)/(a+2.0)-2.0*((a-2.0)/(a+2.0)+1.0)+a)/(a+2.0) + (a-2.0)/(a+2.0)
p=(((((((((((5.92470169e-5*q+1.61224554e-4)*q-3.46481771e-4)*q-1.39681227e-3)*q+1.20588380e-3)*q+8.69014394e-3)*q-8.01387429e-3)*q-5.42122945e-2)*q+1.64048523e-1)*q-1.66031078e-1)*q-9.27637145e-2)*q+2.76978403e-1)
q = (p+1.0)/(1.0+2.0*a)
d = (p+1.0)-q*(1.0+2.0*a)
f = 0.5*d/(a+0.5) + q
f = jnp.where(x>=0.0, f, 2.0*jnp.exp(x**2) - f)
return f
|
467943
|
from os.path import abspath, dirname
from typing import Any, Sequence, Union
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from matplotlib.gridspec import GridSpec
from matplotlib.offsetbox import AnchoredText
from numpy.typing import NDArray
from sklearn.metrics import r2_score
ROOT = dirname(dirname(abspath(__file__)))
NumArray = NDArray[Union[np.float64, np.int_]]
def with_hist(
xs: NDArray[np.float64],
ys: NDArray[np.float64],
cell: GridSpec = None,
bins: int = 100,
) -> Axes:
"""Call before creating a plot and use the returned `ax_main` for all
subsequent plotting ops to create a grid of plots with the main plot in
the lower left and narrow histograms along its x- and/or y-axes displayed
above and near the right edge.
Args:
xs (array): x values.
ys (array): y values.
cell (GridSpec, optional): Cell of a plt GridSpec at which to add the
grid of plots. Defaults to None.
bins (int, optional): Resolution/bin count of the histograms. Defaults to 100.
Returns:
ax: The matplotlib Axes to be used for the main plot.
"""
fig = plt.gcf()
gs = (cell.subgridspec if cell else fig.add_gridspec)(
2, 2, width_ratios=(6, 1), height_ratios=(1, 5), wspace=0, hspace=0
)
ax_main = fig.add_subplot(gs[1, 0])
ax_histx = fig.add_subplot(gs[0, 0], sharex=ax_main)
ax_histy = fig.add_subplot(gs[1, 1], sharey=ax_main)
# x_hist
ax_histx.hist(xs, bins=bins, rwidth=0.8)
ax_histx.axis("off")
# y_hist
ax_histy.hist(ys, bins=bins, rwidth=0.8, orientation="horizontal")
ax_histy.axis("off")
return ax_main
def annotate_bar_heights(
ax: Axes = None,
voffset: int = 10,
hoffset: int = 0,
labels: Sequence[Union[str, int, float]] = None,
fontsize: int = 14,
) -> None:
"""Annotate histograms with a label indicating the height/count of each bar.
Args:
ax (matplotlib.axes.Axes): The axes to annotate.
voffset (int): Vertical offset between the labels and the bars.
hoffset (int): Horizontal offset between the labels and the bars.
labels (list[str]): Labels used for annotating bars. Falls back to the
y-value of each bar if None.
fontsize (int): Annotated text size in pts. Defaults to 14.
"""
if ax is None:
ax = plt.gca()
if labels is None:
labels = [int(patch.get_height()) for patch in ax.patches]
for rect, label in zip(ax.patches, labels):
y_pos = rect.get_height()
x_pos = rect.get_x() + rect.get_width() / 2 + hoffset
if ax.get_yscale() == "log":
y_pos = y_pos + np.log(voffset)
else:
y_pos = y_pos + voffset
# place label at end of the bar and center horizontally
ax.annotate(label, (x_pos, y_pos), ha="center", fontsize=fontsize)
# ensure enough vertical space to display label above highest bar
ax.margins(y=0.1)
def add_mae_r2_box(
xs: NDArray[np.float64],
ys: NDArray[np.float64],
ax: Axes = None,
loc: str = "lower right",
prec: int = 3,
**kwargs: Any,
) -> None:
"""Provide a set of x and y values of equal length and an optional Axes object
on which to print the values' mean absolute error and R^2 coefficient of
determination.
Args:
xs (array, optional): x values.
ys (array, optional): y values.
ax (Axes, optional): matplotlib Axes on which to add the box. Defaults to None.
loc (str, optional): Where on the plot to place the AnchoredText object.
Defaults to "lower right".
prec (int, optional): # of decimal places in printed metrics. Defaults to 3.
"""
if ax is None:
ax = plt.gca()
mask = ~np.isnan(xs) & ~np.isnan(ys)
xs, ys = xs[mask], ys[mask]
mae_str = f"$\\mathrm{{MAE}} = {np.abs(xs - ys).mean():.{prec}f}$\n"
r2_str = f"$R^2 = {r2_score(xs, ys):.{prec}f}$"
frameon: bool = kwargs.pop("frameon", False)
text_box = AnchoredText(mae_str + r2_str, loc=loc, frameon=frameon, **kwargs)
ax.add_artist(text_box)
def get_crystal_system(spg: int) -> str:
"""Get the crystal system for an international space group number."""
if 0 < spg < 3:
return "triclinic"
if spg < 16:
return "monoclinic"
if spg < 75:
return "orthorhombic"
if spg < 143:
return "tetragonal"
if spg < 168:
return "trigonal"
if spg < 195:
return "hexagonal"
if spg < 231:
return "cubic"
else:
raise ValueError(f"Received invalid space group {spg}")
|
467949
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .attention import Attention
from .residual_mlp import ResidualMLP
from .shifted_softplus import ShiftedSoftplus
from .swish import Swish
from typing import Optional
class NonlinearElectronicEmbedding(nn.Module):
"""
Block for updating atomic features through nonlocal interactions with the
electrons.
Arguments:
num_features (int):
Dimensions of feature space.
num_basis_functions (int):
Number of radial basis functions.
num_residual_pre_i (int):
Number of residual blocks applied to atomic features in i branch
(central atoms) before computing the interaction.
num_residual_pre_j (int):
Number of residual blocks applied to atomic features in j branch
(neighbouring atoms) before computing the interaction.
num_residual_post (int):
Number of residual blocks applied to interaction features.
activation (str):
Kind of activation function. Possible values:
'swish': Swish activation function.
'ssp': Shifted softplus activation function.
"""
def __init__(
self, num_features: int, num_residual: int, activation: str = "swish"
) -> None:
""" Initializes the NonlinearElectronicEmbedding class. """
super(NonlinearElectronicEmbedding, self).__init__()
self.linear_q = nn.Linear(num_features, num_features, bias=False)
self.featurize_k = nn.Linear(1, num_features)
self.resblock_k = ResidualMLP(
num_features, num_residual, activation=activation, zero_init=True
)
self.featurize_v = nn.Linear(1, num_features, bias=False)
self.resblock_v = ResidualMLP(
num_features,
num_residual,
activation=activation,
zero_init=True,
bias=False,
)
self.reset_parameters()
def reset_parameters(self) -> None:
""" Initialize parameters. """
nn.init.orthogonal_(self.linear_q.weight)
nn.init.orthogonal_(self.featurize_k.weight)
nn.init.zeros_(self.featurize_k.bias)
nn.init.orthogonal_(self.featurize_v.weight)
def forward(
self,
x: torch.Tensor,
E: torch.Tensor,
num_batch: int,
batch_seg: torch.Tensor,
mask: Optional[torch.Tensor] = None,
eps: float = 1e-8,
) -> torch.Tensor:
"""
Evaluate interaction block.
N: Number of atoms.
x (FloatTensor [N, num_features]):
Atomic feature vectors.
"""
e = E.unsqueeze(-1)
q = self.linear_q(x) # queries
k = self.resblock_k(self.featurize_k(e))[batch_seg] # keys
v = self.resblock_v(self.featurize_v(e))[batch_seg] # values
# dot product
dot = torch.sum(k * q, dim=-1)
# determine maximum dot product (for numerics)
if num_batch > 1:
if mask is None:
mask = (
nn.functional.one_hot(batch_seg)
.to(dtype=x.dtype, device=x.device)
.transpose(-1, -2)
)
tmp = dot.view(1, -1).expand(num_batch, -1)
tmp, _ = torch.max(mask * tmp, dim=-1)
if tmp.device.type == "cpu": # indexing is faster on CPUs
maximum = tmp[batch_seg]
else: # gathering is faster on GPUs
maximum = torch.gather(tmp, 0, batch_seg)
else:
maximum = torch.max(dot)
# attention
d = k.shape[-1]
a = torch.exp((dot - maximum) / d ** 0.5)
anorm = a.new_zeros(num_batch).index_add_(0, batch_seg, a)
if a.device.type == "cpu": # indexing is faster on CPUs
anorm = anorm[batch_seg]
else: # gathering is faster on GPUs
anorm = torch.gather(anorm, 0, batch_seg)
return (a / (anorm + eps)).unsqueeze(-1) * v
|
467950
|
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras import backend as K
from keras.preprocessing import sequence
import Levenshtein
import pickle
# The custom accuracy metric used for this task
def accuracy(y_true, y_pred):
y = tf.argmax(y_true, axis =- 1)
y_ = tf.argmax(y_pred, axis =- 1)
mask = tf.greater(y, 0)
return K.cast(K.equal(tf.boolean_mask(y, mask), tf.boolean_mask(y_, mask)), K.floatx())
# Maps the sequence to a one-hot encoding
def onehot_to_seq(oh_seq, index, length=None):
s = ''
if length is None:
for idx, o in enumerate(oh_seq):
i = np.argmax(o)
if i != 0:
s += index[i]
else:
break
else:
for idx, o in enumerate(oh_seq):
i = np.argmax(o[1:])
if idx < length:
s += index[i+1]
else:
break
return s
# prints the results
def print_results(x, y_, revsere_decoder_index):
# print("input : " + str(x))
# print("prediction: " + str(onehot_to_seq(y_, revsere_decoder_index).upper()))
print(str(onehot_to_seq(y_, revsere_decoder_index).upper()))
def decode_predictions(y_, revsere_decoder_index, length=None):
return str(onehot_to_seq(y_, revsere_decoder_index, length=length).upper())
def predict_all(model, test_df, tokenizer_encoder, tokenizer_decoder, n_gram, augmented_input=None, max_len=None, filepath="submission.csv"):
test_input_ids = test_df['id'].values
test_input_seqs = test_df['input'].values.T
test_input_grams = seq2ngrams(test_input_seqs, n=n_gram)
revsere_decoder_index = {value:key for key,value in tokenizer_decoder.word_index.items()}
if max_len is None:
max_test_length = max([len(x) for x in test_input_grams])
else:
max_test_length = max_len
test_input_data_full = tokenizer_encoder.texts_to_sequences(test_input_grams)
test_input_data_full = sequence.pad_sequences(test_input_data_full, maxlen = max_test_length, padding = 'post')
if augmented_input is None:
y_test_pred = model.predict(test_input_data_full[:])
else:
y_test_pred = model.predict([test_input_data_full[:], augmented_input])
np.save(filepath.replace(".csv", "_raw_pred.npy"), y_test_pred)
y_test_pred_decoded = []
for i in range(len(y_test_pred)):
decoded = decode_predictions(y_test_pred[i], revsere_decoder_index, length=len(test_input_grams[i]))
y_test_pred_decoded.append(decoded)
test_pred_df = pd.DataFrame({'id':test_input_ids, "expected": y_test_pred_decoded},
columns = ['id', 'expected'])
if np.all(np.array([len(x) for x in test_pred_df['expected']]) == np.array([len(x) for x in test_df['input']])):
print("All length match")
else:
print("Some lengths do not match!")
test_pred_df.to_csv(filepath, index=False)
return test_pred_df
def ham_distance(x, y):
return np.sum([a != b for a, b in zip(x, y)])
def edit_score(input_df, pred_df, filepath="edit_score.csv", plot=True):
assert np.all(input_df['id'].values == pred_df['id'].values)
if not np.all(np.array([len(x) for x in pred_df['expected']]) == np.array([len(x) for x in input_df['input']])):
print("Some lengths do not match!")
return None, None
output_df = input_df.copy().reset_index(drop=True)
lev_dist = [Levenshtein.distance(x, y) for x, y in zip(input_df['expected'], pred_df['expected'])]
ham_dist = [ham_distance(x, y) for x, y in zip(input_df['expected'], pred_df['expected'])]
lev_score = np.mean(lev_dist)
ham_score = np.mean(ham_dist)
total_ham = np.sum(ham_dist)
total_len = input_df['expected'].map(len).sum()
accuracy = 1 - total_ham / total_len
output_df['predicted'] = pred_df['expected'].values
output_df['levdist'] = np.array(lev_dist)
output_df['hamdist'] = np.array(ham_dist)
output_df['levpercent'] = output_df['levdist'] / output_df['len']
output_df['hampercent'] = output_df['hamdist'] / output_df['len']
output_df['accuracy'] = 1 - output_df['hampercent']
ham_percent = np.mean(output_df['hampercent'])
mean_acc = np.mean(output_df['accuracy'])
output_df.to_csv(filepath, index=False)
print_str = "total acc: {:.4f}, mean acc: {:.4f}, lev: {:.1f}, ham: {:.1f}".format(accuracy, mean_acc, lev_score, ham_score)
print(print_str)
output_df.plot("len", "accuracy", kind="scatter")
plt.hlines(y=accuracy, xmin=0, xmax=output_df['len'].max())
plt.title(print_str)
plt.savefig(filepath.replace(".csv", "_plot.png"))
if plot:
plt.show()
plt.close()
return accuracy, output_df
# Computes and returns the n-grams of a particualr sequence, defaults to trigrams
def seq2ngrams(seqs, n = 3):
return np.array([[seq[i : i + n] for i in range(len(seq))] for seq in seqs])
def load_augmented_data(npy_path, max_len, centered=False):
data = np.load(npy_path)
residue_list = ['A', 'C', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M', 'L', 'N', 'Q', 'P', 'S', 'R', 'T', 'W', 'V', 'Y', 'X','NoSeq']
q8_list = ['L', 'B', 'E', 'G', 'I', 'H', 'S', 'T','NoSeq']
data_reshape = data.reshape(data.shape[0], 700, -1)
residue_onehot = data_reshape[:,:,0:22]
residue_q8_onehot = data_reshape[:,:,22:31]
profile = data_reshape[:,:,35:57]
# if centered:
# profile = profile - 0.5 # range [0,1]
if max_len > profile.shape[1]:
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
zero_arr[:,:,-1] = 1.0
profile_padded = np.concatenate([profile, zero_arr], axis=1)
else:
profile_padded = profile
residue_array = np.array(residue_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
id_list = np.arange(1, len(residue_array) + 1)
len_list = np.array([len(x) for x in residue_str_list])
train_df = pd.DataFrame({'id': id_list, 'len': len_list, 'input': residue_str_list, 'expected': q8_str_list})
return train_df, profile_padded
|
467956
|
import csv
import tempfile
import pytest
from datarobot_batch_scoring.batch_scoring import run_batch_predictions
from utils import PickableMock
from datarobot_batch_scoring.reader import DETECT_SAMPLE_SIZE_SLOW
def test_gzipped_csv(live_server, ui):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv.gz',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False,
max_batch_size=1000
)
assert ret is None
def test_explicit_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_explicit_delimiter_gzip(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/temperatura_predict.csv.gz',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_tab_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter='\t',
dataset='tests/fixtures/temperatura_predict_tab.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_empty_file(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(csv.Error) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/empty.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert "The csv module failed to detect the CSV dialect." in str(ctx.value)
def test_no_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(csv.Error) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=';',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert str(ctx.value) == ("Could not determine delimiter")
def test_bad_newline(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/diabetes_bad_newline.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
lines = len(open('out.csv', 'rb').readlines())
assert lines == 5
ui.warning.assert_any_call('Detected empty rows in the CSV file. '
'These rows will be discarded.')
def test_header_only(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(ValueError) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/header_only.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert str(ctx.value) == ("Input file 'tests/fixtures/header_only.csv' "
"is empty.")
def test_quotechar_in_keep_cols(live_server):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ui = PickableMock()
with tempfile.NamedTemporaryFile(prefix='test_',
suffix='.csv',
delete=False) as fd:
head = open("tests/fixtures/quotes_input_head.csv",
"rb").read()
body_1 = open("tests/fixtures/quotes_input_first_part.csv",
"rb").read()
body_2 = open("tests/fixtures/quotes_input_bad_part.csv",
"rb").read()
fd.file.write(head)
size = 0
while size < DETECT_SAMPLE_SIZE_SLOW:
fd.file.write(body_1)
size += len(body_1)
fd.file.write(body_2)
fd.close()
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=["b", "c"],
delimiter=None,
dataset=fd.name,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=True,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
last_line = open("out.csv", "rb").readlines()[-1]
expected_last_line = b'1044,2,"eeeeeeee ""eeeeee"" eeeeeeeeeeee'
assert last_line[:len(expected_last_line)] == expected_last_line
def test_quoted_newline_in_keep_cols_in_fast_mode_fails(live_server):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ui = PickableMock()
with tempfile.NamedTemporaryFile(prefix='test_',
suffix='.csv',
delete=False) as fd:
head = open("tests/fixtures/quotes_input_head.csv",
"rb").read()
body_1 = open("tests/fixtures/quotes_input_first_part.csv",
"rb").read()
body_2 = open("tests/fixtures/quotes_input_bad_part_with_newline.csv",
"rb").read()
fd.file.write(head)
size = 0
while size < DETECT_SAMPLE_SIZE_SLOW:
fd.file.write(body_1)
size += len(body_1)
fd.file.write(body_2)
fd.close()
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=["b", "c"],
delimiter=None,
dataset=fd.name,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=True,
fast_mode=True,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is 1
|
468006
|
import numpy as np
import robot_sim.robots.ur3_dual.ur3_dual as ur3ds
import robot_con.ur.ur3_dual_x as ur3dx
import motion.probabilistic.rrt_connect as rrtc
import motion.optimization_based.incremental_nik as inik
import manipulation.pick_place_planner as ppp
import visualization.panda.world as wd
class UR3DualHelper(object):
def __init__(self,
pos=np.zeros(3),
rotmat=np.eye(3),
use_real=False,
create_sim_world=True,
lft_robot_ip='10.2.0.50',
rgt_robot_ip='10.2.0.51',
pc_ip='10.2.0.100',
cam_pos=np.array([2, 1, 3]),
lookat_pos=np.array([0, 0, 1.1]),
auto_cam_rotate=False):
self.robot_s = ur3ds.UR3Dual(pos=pos, rotmat=rotmat)
self.rrt_planner = rrtc.RRTConnect(self.robot_s)
self.inik_solver = inik.IncrementalNIK(self.robot_s)
self.pp_planner = ppp.PickPlacePlanner(self.robot_s)
if use_real:
self.robot_x = ur3dx.UR3DualX(lft_robot_ip=lft_robot_ip,
rgt_robot_ip=rgt_robot_ip,
pc_ip=pc_ip)
if create_sim_world:
self.sim_world = wd.World(cam_pos=cam_pos,
lookat_pos=lookat_pos,
auto_cam_rotate=auto_cam_rotate)
def plan_motion(self,
component_name,
start_conf,
goal_conf,
obstacle_list=[],
otherrobot_list=[],
ext_dist=2,
maxiter=1000,
maxtime=15.0,
animation=False):
path = self.rrt_planner.plan(component_name=component_name,
start_conf=start_conf,
goal_conf=goal_conf,
obstacle_list=obstacle_list,
otherrobot_list=otherrobot_list,
ext_dist=ext_dist,
max_iter=maxiter,
max_time=maxtime,
animation=animation)
return path
def plan_pick_and_place(self,
manipulator_name,
hand_name,
objcm,
grasp_info_list,
start_conf,
goal_homomat_list):
"""
:param manipulator_name:
:param hand_name:
:param objcm:
:param grasp_info_list:
:param start_conf:
:param goal_homomat_list:
:return:
author: weiwei
date: 20210409
"""
self.pp_planner.gen_pick_and_place_motion(manipulator_name,
hand_name,
objcm,
grasp_info_list,
start_conf,
goal_homomat_list)
|
468018
|
import sys
import vim
from powerline.bindings.vim import vim_get_func, vim_getoption, environ, current_tabpage, get_vim_encoding
from powerline.renderer import Renderer
from powerline.colorscheme import ATTR_BOLD, ATTR_ITALIC, ATTR_UNDERLINE
from powerline.theme import Theme
from powerline.lib.unicode import unichr, register_strwidth_error
vim_mode = vim_get_func('mode', rettype='unicode')
if int(vim.eval('v:version')) >= 702:
_vim_mode = vim_mode
vim_mode = lambda: _vim_mode(1)
mode_translations = {
unichr(ord('V') - 0x40): '^V',
unichr(ord('S') - 0x40): '^S',
}
class VimRenderer(Renderer):
'''Powerline vim segment renderer.'''
character_translations = Renderer.character_translations.copy()
character_translations[ord('%')] = '%%'
segment_info = Renderer.segment_info.copy()
segment_info.update(environ=environ)
def __init__(self, *args, **kwargs):
if not hasattr(vim, 'strwidth'):
# Hope nobody want to change this at runtime
if vim.eval('&ambiwidth') == 'double':
kwargs = dict(**kwargs)
kwargs['ambigious'] = 2
super(VimRenderer, self).__init__(*args, **kwargs)
self.hl_groups = {}
self.prev_highlight = None
self.strwidth_error_name = register_strwidth_error(self.strwidth)
self.encoding = get_vim_encoding()
def shutdown(self):
self.theme.shutdown()
for match in self.local_themes.values():
if 'theme' in match:
match['theme'].shutdown()
def add_local_theme(self, matcher, theme):
if matcher in self.local_themes:
raise KeyError('There is already a local theme with given matcher')
self.local_themes[matcher] = theme
def get_matched_theme(self, match):
try:
return match['theme']
except KeyError:
match['theme'] = Theme(theme_config=match['config'], main_theme_config=self.theme_config, **self.theme_kwargs)
return match['theme']
def get_theme(self, matcher_info):
if matcher_info is None:
return self.get_matched_theme(self.local_themes[None])
for matcher in self.local_themes.keys():
if matcher and matcher(matcher_info):
return self.get_matched_theme(self.local_themes[matcher])
else:
return self.theme
if hasattr(vim, 'strwidth'):
if sys.version_info < (3,):
def strwidth(self, string):
# Does not work with tabs, but neither is strwidth from default
# renderer
return vim.strwidth(string.encode(self.encoding, 'replace'))
else:
@staticmethod
def strwidth(string):
return vim.strwidth(string)
def get_segment_info(self, segment_info, mode):
return segment_info or self.segment_info
def render(self, window=None, window_id=None, winnr=None, is_tabline=False):
'''Render all segments.'''
segment_info = self.segment_info.copy()
if window is vim.current.window:
mode = vim_mode()
mode = mode_translations.get(mode, mode)
else:
mode = 'nc'
segment_info.update(
window=window,
mode=mode,
window_id=window_id,
winnr=winnr,
buffer=window.buffer,
tabpage=current_tabpage(),
encoding=self.encoding,
)
segment_info['tabnr'] = segment_info['tabpage'].number
segment_info['bufnr'] = segment_info['buffer'].number
if is_tabline:
winwidth = int(vim_getoption('columns'))
else:
winwidth = segment_info['window'].width
statusline = super(VimRenderer, self).render(
mode=mode,
width=winwidth,
segment_info=segment_info,
matcher_info=(None if is_tabline else segment_info),
)
statusline = statusline.encode(self.encoding, self.strwidth_error_name)
return statusline
def reset_highlight(self):
self.hl_groups.clear()
def hlstyle(self, fg=None, bg=None, attrs=None, *args, **kwargs):
'''Highlight a segment.
If an argument is None, the argument is ignored. If an argument is
False, the argument is reset to the terminal defaults. If an argument
is a valid color or attribute, it’s added to the vim highlight group.
'''
# In order not to hit E541 two consequent identical highlighting
# specifiers may be squashed into one.
attrs = attrs or 0 # Normalize `attrs`
if (fg, bg, attrs) == self.prev_highlight:
return ''
self.prev_highlight = (fg, bg, attrs)
# We don’t need to explicitly reset attributes in vim, so skip those
# calls
if not attrs and not bg and not fg:
return ''
if not (fg, bg, attrs) in self.hl_groups:
hl_group = {
'ctermfg': 'NONE',
'guifg': None,
'ctermbg': 'NONE',
'guibg': None,
'attrs': ['NONE'],
'name': '',
}
if fg is not None and fg is not False:
hl_group['ctermfg'] = fg[0]
hl_group['guifg'] = fg[1]
if bg is not None and bg is not False:
hl_group['ctermbg'] = bg[0]
hl_group['guibg'] = bg[1]
if attrs:
hl_group['attrs'] = []
if attrs & ATTR_BOLD:
hl_group['attrs'].append('bold')
if attrs & ATTR_ITALIC:
hl_group['attrs'].append('italic')
if attrs & ATTR_UNDERLINE:
hl_group['attrs'].append('underline')
hl_group['name'] = (
'Pl_'
+ str(hl_group['ctermfg']) + '_'
+ str(hl_group['guifg']) + '_'
+ str(hl_group['ctermbg']) + '_'
+ str(hl_group['guibg']) + '_'
+ ''.join(hl_group['attrs'])
)
self.hl_groups[(fg, bg, attrs)] = hl_group
vim.command('hi {group} ctermfg={ctermfg} guifg={guifg} guibg={guibg} ctermbg={ctermbg} cterm={attrs} gui={attrs}'.format(
group=hl_group['name'],
ctermfg=hl_group['ctermfg'],
guifg='#{0:06x}'.format(hl_group['guifg']) if hl_group['guifg'] is not None else 'NONE',
ctermbg=hl_group['ctermbg'],
guibg='#{0:06x}'.format(hl_group['guibg']) if hl_group['guibg'] is not None else 'NONE',
attrs=','.join(hl_group['attrs']),
))
return '%#' + self.hl_groups[(fg, bg, attrs)]['name'] + '#'
renderer = VimRenderer
|
468023
|
from __future__ import print_function
class Sampler(object):
def pretrain_begin(self, begin, end):
pass
def pretrain_end(self):
pass
def pretrain_begin_iteration(self):
pass
def pretrain_end_iteration(self):
pass
def online_begin(self, begin, end):
pass
def online_end(self):
pass
def online_begin_iteration(self):
pass
def online_end_iteration(self):
pass
def make_pretrain_input(self, batch):
pass
def make_online_input(self, batch):
pass
def shuffle_sample(self):
pass
def batches(self, batchsize):
raise NotImplementedError()
def sample_size(self):
raise NotImplementedError()
|
468030
|
from .Scheduler import *
import numpy as np
from copy import deepcopy
class RLScheduler(Scheduler):
def __init__(self):
super().__init__()
def selection(self):
return self.RandomContainerSelection()
def placement(self, containerIDs):
return self.MaxFullPlacement(containerIDs)
|
468036
|
import unittest
from quickbooks import QuickBooks
from quickbooks.objects.estimate import Estimate
class EstimateTests(unittest.TestCase):
def test_unicode(self):
estimate = Estimate()
estimate.TotalAmt = 10
self.assertEquals(str(estimate), "10")
def test_valid_object_name(self):
obj = Estimate()
client = QuickBooks()
result = client.isvalid_object_name(obj.qbo_object_name)
self.assertTrue(result)
|
468059
|
import unittest
from mal import Anime
class TestAnime(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.anime = Anime(1)
def test_anime(self):
self.assertEqual(self.anime.mal_id, 1)
self.assertEqual(self.anime.title, "Cowboy Bebop")
self.assertEqual(self.anime.title_english, "Cowboy Bebop")
self.assertEqual(self.anime.title_japanese, "カウボーイビバップ")
self.assertEqual(self.anime.title_synonyms, [])
self.assertEqual(self.anime.url, "https://myanimelist.net/anime/1/Cowboy_Bebop")
self.assertEqual(
self.anime.image_url, "https://cdn.myanimelist.net/images/anime/4/19644.jpg"
)
self.assertEqual(self.anime.type, "TV")
self.assertEqual(self.anime.episodes, 26)
self.assertEqual(self.anime.status, "Finished Airing")
self.assertEqual(self.anime.aired, "Apr 3, 1998 to Apr 24, 1999")
self.assertEqual(self.anime.premiered, "Spring 1998")
self.assertEqual(self.anime.broadcast, "Saturdays at 01:00 (JST)")
self.assertEqual(self.anime.producers, ["Bandai Visual"])
self.assertEqual(self.anime.licensors, ["Funimation", "Bandai Entertainment"])
self.assertEqual(self.anime.studios, ["Sunrise"])
self.assertEqual(self.anime.source, "Original")
self.assertEqual(
self.anime.genres,
["Action", "Adventure", "Comedy", "Drama", "Sci-Fi"],
)
self.assertEqual(self.anime.themes, ["Space"])
self.assertEqual(self.anime.duration, "24 min. per ep.")
self.assertEqual(self.anime.rating, "R - 17+ (violence & profanity)")
self.assertIsInstance(self.anime.score, float)
self.assertIsInstance(self.anime.scored_by, int)
self.assertIsInstance(self.anime.rank, int)
self.assertIsInstance(self.anime.popularity, int)
self.assertIsInstance(self.anime.members, int)
self.assertIsInstance(self.anime.favorites, int)
self.assertEqual(
self.anime.synopsis,
"In the year 2071, humanity has colonized several of the planets and moons "
"of the solar system leaving the now uninhabitable surface of planet Earth "
"behind. The Inter Solar System Police attempts to keep peace in the "
'galaxy, aided in part by outlaw bounty hunters, referred to as "Cowboys." '
"The ragtag team aboard the spaceship Bebop are two such individuals. "
"Mellow and carefree S<NAME> is balanced by his boisterous, "
"pragmatic partner <NAME> as the pair makes a living chasing bounties "
"and collecting rewards. Thrown off course by the addition of new members "
"that they meet in their travels—Ein, a genetically engineered, highly "
"intelligent Welsh Corgi; femme fatale <NAME>, an enigmatic "
"trickster with memory loss; and the strange computer whiz kid Edward "
"Wong—the crew embarks on thrilling adventures that unravel each member's "
"dark and mysterious past little by little. Well-balanced with high "
"density action and light-hearted comedy, Cowboy Bebop is a space Western "
"classic and an homage to the smooth and improvised music it is named "
"after. [Written by MAL Rewrite]",
)
self.assertEqual(
self.anime.background,
"When Cowboy Bebop first aired in spring of 1998 on TV Tokyo, only "
"episodes 2, 3, 7-15, and 18 were broadcast, it was concluded with a recap "
"special known as Yose Atsume Blues. This was due to anime censorship "
"having increased following the big controversies over Evangelion, as a "
"result most of the series was pulled from the air due to violent content. "
"Satellite channel WOWOW picked up the series in the fall of that year and "
"aired it in its entirety uncensored. Cowboy Bebop was not a ratings hit "
"in Japan, but sold over 19,000 DVD units in the initial release run, and "
"81,000 overall. Protagonist Spike Spiegel won Best Male Character, and "
"<NAME> won Best Voice Actor for her role as <NAME> in "
"the 1999 and 2000 Anime Grand Prix, respectively. Cowboy Bebop's biggest "
"influence has been in the United States, where it premiered on Adult Swim "
"in 2001 with many reruns since. The show's heavy Western influence struck "
'a chord with American viewers, where it became a "gateway drug" to '
"anime aimed at adult audiences.",
)
self.assertEqual(
self.anime.related_anime,
{
"Adaptation": [
"Cowboy Bebop",
"Shooting Star Bebop: Cowboy Bebop",
],
"Side story": [
"Cowboy Bebop: Tengoku no Tobira",
"Cowboy Bebop: Ein no Natsuyasumi",
],
"Summary": ["Cowboy Bebop: Yose Atsume Blues"],
},
)
self.assertEqual(
self.anime.opening_themes, ['"Tank!" by The Seatbelts (eps 1-25)']
)
self.assertEqual(
self.anime.ending_themes,
[
'"The Real Folk Blues" by The Seatbelts feat. Mai Yamane (eps 1-12, 14-25)',
'"Space Lion" by The Seatbelts (eps 13)',
'"Blue" by The Seatbelts feat. Mai Yamane (eps 26)',
]
)
self.assertEqual(self.anime.characters[0].name, "<NAME>")
self.assertEqual(self.anime.characters[0].role, "Main")
self.assertEqual(self.anime.characters[0].voice_actor, "Yamadera, Kouichi")
self.assertEqual(self.anime.staff[0].name, "<NAME>")
self.assertEqual(self.anime.staff[0].role, "Producer")
if __name__ == "__main__":
unittest.main()
|
468076
|
import unittest
import numpy as np
from arrus.utils.tests.utils import ArrusImagingTestCase
from arrus.utils.imaging import (
FirFilter,
BandpassFilter
)
class FirFilterTestCase(ArrusImagingTestCase):
def setUp(self) -> None:
self.op = FirFilter
self.context = self.get_default_context()
def run_op(self, **kwargs):
# Currently FirFilter supports only 3D arrays,
# the filtering is done along the last axis.
# So we need to reshape the data to proper sizes.
# Currently only np.int16 data is supported.
# Currently only np.float32 taps are supported.
data = kwargs['data']
data = np.array(data)
if len(data.shape) > 3:
raise ValueError("Currently data supports at most 3 dimensions.")
if len(data.shape) < 3:
dim_diff = 3-len(data.shape)
data = np.expand_dims(data, axis=tuple(np.arange(dim_diff)))
data = data.astype(np.int16)
kwargs["data"] = data
kwargs["taps"] = np.asarray(kwargs["taps"]).astype(np.float32)
result = super().run_op(**kwargs)
return np.squeeze(result)
# Test cases:
# Corner cases:
def test_no_input_signal(self):
"""Empty input array should not be accepted. """
with self.assertRaisesRegex(ValueError, "Empty array") as ctx:
self.run_op(data=[], taps=[])
def test_simple_1d_convolution_single_coeff(self):
"""A simple and easy to analyse test case."""
# Given
data = np.arange(5).astype(np.int16)
filter_taps = np.array([1]).astype(np.float32)
# Run
result = self.run_op(data=data, taps=filter_taps)
print(result.shape)
# Expect
np.testing.assert_equal(result, data)
def test_simple_1d_convolution(self):
"""A simple and easy to analyse test case, example 2."""
data = np.arange(5)
filter_taps = np.array([1, 1, 1])
# Run
result = self.run_op(data=data, taps=filter_taps)
# Expect
np.testing.assert_equal(result, [1, 3, 6, 9, 7])
def test_convolution_properly_inversed(self):
""" Test if the data is properly reversed in the convolution,
i.e. sum(data[j-i] * coeffs[i]) """
data = np.arange(5)
filter_taps = np.array([3, 2, 1])
# Run
result = self.run_op(data=data, taps=filter_taps)
# Expect
# [0, 1, 2, 3, 4]
# 1. [2, 3]
# 2. [1, 2, 3]
# 3. [1, 2, 3]
# 4. [1, 2, 3]
# 5. [1, 2]
np.testing.assert_equal(result, [3, 8, 14, 20, 11])
if __name__ == "__main__":
unittest.main()
|
468134
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import binary_sensor, esp32_ble_tracker
from esphome.const import (
CONF_MAC_ADDRESS,
CONF_SERVICE_UUID,
CONF_IBEACON_MAJOR,
CONF_IBEACON_MINOR,
CONF_IBEACON_UUID,
)
DEPENDENCIES = ["esp32_ble_tracker"]
ble_presence_ns = cg.esphome_ns.namespace("ble_presence")
BLEPresenceDevice = ble_presence_ns.class_(
"BLEPresenceDevice",
binary_sensor.BinarySensor,
cg.Component,
esp32_ble_tracker.ESPBTDeviceListener,
)
def _validate(config):
if CONF_IBEACON_MAJOR in config and CONF_IBEACON_UUID not in config:
raise cv.Invalid("iBeacon major identifier requires iBeacon UUID")
if CONF_IBEACON_MINOR in config and CONF_IBEACON_UUID not in config:
raise cv.Invalid("iBeacon minor identifier requires iBeacon UUID")
return config
CONFIG_SCHEMA = cv.All(
binary_sensor.binary_sensor_schema(BLEPresenceDevice)
.extend(
{
cv.Optional(CONF_MAC_ADDRESS): cv.mac_address,
cv.Optional(CONF_SERVICE_UUID): esp32_ble_tracker.bt_uuid,
cv.Optional(CONF_IBEACON_MAJOR): cv.uint16_t,
cv.Optional(CONF_IBEACON_MINOR): cv.uint16_t,
cv.Optional(CONF_IBEACON_UUID): cv.uuid,
}
)
.extend(esp32_ble_tracker.ESP_BLE_DEVICE_SCHEMA)
.extend(cv.COMPONENT_SCHEMA),
cv.has_exactly_one_key(CONF_MAC_ADDRESS, CONF_SERVICE_UUID, CONF_IBEACON_UUID),
_validate,
)
async def to_code(config):
var = await binary_sensor.new_binary_sensor(config)
await cg.register_component(var, config)
await esp32_ble_tracker.register_ble_device(var, config)
if CONF_MAC_ADDRESS in config:
cg.add(var.set_address(config[CONF_MAC_ADDRESS].as_hex))
if CONF_SERVICE_UUID in config:
if len(config[CONF_SERVICE_UUID]) == len(esp32_ble_tracker.bt_uuid16_format):
cg.add(
var.set_service_uuid16(
esp32_ble_tracker.as_hex(config[CONF_SERVICE_UUID])
)
)
elif len(config[CONF_SERVICE_UUID]) == len(esp32_ble_tracker.bt_uuid32_format):
cg.add(
var.set_service_uuid32(
esp32_ble_tracker.as_hex(config[CONF_SERVICE_UUID])
)
)
elif len(config[CONF_SERVICE_UUID]) == len(esp32_ble_tracker.bt_uuid128_format):
uuid128 = esp32_ble_tracker.as_reversed_hex_array(config[CONF_SERVICE_UUID])
cg.add(var.set_service_uuid128(uuid128))
if CONF_IBEACON_UUID in config:
ibeacon_uuid = esp32_ble_tracker.as_hex_array(str(config[CONF_IBEACON_UUID]))
cg.add(var.set_ibeacon_uuid(ibeacon_uuid))
if CONF_IBEACON_MAJOR in config:
cg.add(var.set_ibeacon_major(config[CONF_IBEACON_MAJOR]))
if CONF_IBEACON_MINOR in config:
cg.add(var.set_ibeacon_minor(config[CONF_IBEACON_MINOR]))
|
468141
|
import torch
from overrides import overrides
from allennlp.training.metrics.metric import Metric
import torch.distributed as dist
@Metric.register("vqa")
class VqaMeasure(Metric):
"""Compute the VQA metric, as described in
https://www.semanticscholar.org/paper/VQA%3A-Visual-Question-Answering-Agrawal-Lu/97ad70a9fa3f99adf18030e5e38ebe3d90daa2db
In VQA, we take the answer with the highest score, and then we find out how often
humans decided this was the right answer. The accuracy score for an answer is
`min(1.0, human_count / 3)`.
This metric takes the logits from the models, i.e., a score for each possible answer,
and the labels for the question, together with their weights.
"""
def __init__(self) -> None:
self._sum_of_scores = 0.0
self._score_count = 0
@overrides
def __call__(self, logits: torch.Tensor, labels: torch.Tensor, label_weights: torch.Tensor):
"""
# Parameters
logits : `torch.Tensor`, required.
A tensor of predictions of shape (batch_size, num_classes).
labels : `torch.Tensor`, required.
A tensor of integer class label of shape (batch_size, num_labels).
label_weights : `torch.Tensor`, required.
A tensor of floats of shape (batch_size, num_labels), giving a weight or score to
every one of the labels.
"""
logits, labels, label_weights = self.detach_tensors(logits, labels, label_weights)
predictions = logits.argmax(dim=1)
# Sum over dimension 1 gives the score per question. We care about the overall sum though,
# so we sum over all dimensions.
local_sum_of_scores = (
(label_weights * (labels == predictions.unsqueeze(-1))).sum().to(torch.float32)
)
local_score_count = torch.tensor(labels.size(0), dtype=torch.int32, device=labels.device)
from allennlp.common.util import is_distributed
if is_distributed():
dist.all_reduce(local_sum_of_scores, op=dist.ReduceOp.SUM)
dist.all_reduce(local_score_count, op=dist.ReduceOp.SUM)
self._sum_of_scores += local_sum_of_scores.item()
self._score_count += local_score_count.item()
@overrides
def get_metric(self, reset: bool = False):
if self._score_count > 0:
result = self._sum_of_scores / self._score_count
else:
result = 0.0
result_dict = {"score": result}
if reset:
self.reset()
return result_dict
@overrides
def reset(self) -> None:
self._sum_of_scores = 0.0
self._score_count = 0
|
468163
|
from typing import NewType
from web3._utils.compat import TypedDict
Drip = NewType('Drip', int)
class SponsorInfo(TypedDict):
sponsorBalanceForCollateral: int
sponsorBalanceForGas: int
sponsorGasBound: int
sponsorForCollateral: str
sponsorForGas: str
|
468168
|
import os
# These imports are here so that when we evaluate parameters we have access to the functions
from math import *
import numpy as n
class EvaluatedParams(dict):
"""
Stores parameters that have been evaluated and ensures consistency of default parameters
if defaults are used in multiple places or are needed in visualization as well as in
computation.
"""
def __init__(self,basedict = None):
dict.__init__(self)
self.defaults = {}
if basedict is not None:
self.update(basedict)
def get(self, k, default):
if k not in self.defaults:
self.defaults[k] = default
else:
assert self.defaults[k] == default
return dict.get(self,k,self.defaults[k])
def __getitem__(self, k):
if k not in self and k in self.defaults:
return self.defaults[k]
else:
return dict.__getitem__(self,k)
class Params():
"""
Class for parameters of an experiment.
Loads parameters from multiple files, using dictionary syntax.
The parameters are loaded in order of the file specified, so the first file can be defaults,
the second can be task specific, the third can be experiment specific, etc.
Each entry in the globalparams dict is eval'ed when evaluate is called, and the arguments passed to evaluate
can be used in the evaluation.
"""
def __init__(self, fnames = []):
self.globalparams = {}
self.fnames = fnames
self.load()
def load(self):
"Load all fnames in order. Skip ones that don't exist."
for fname in self.fnames:
if os.path.isfile(fname):
with open(fname) as f:
indict = eval(f.read()) # read entire file
self.globalparams.update(indict)
def partial_evaluate(self, fields__, **kwargs):
locals().update(kwargs)
cparams = EvaluatedParams(kwargs)
for k in fields__:
if k in self.globalparams:
v = self.globalparams[k]
if isinstance(v, str):
cparams[k] = eval(v)
else:
cparams[k] = v
return cparams
def evaluate(self, skipfields = set(), **kwargs):
locals().update(kwargs)
cparams = EvaluatedParams(kwargs)
for k,v in self.globalparams.iteritems():
if isinstance(v, str) and k not in skipfields:
cparams[k] = eval(v)
else:
cparams[k] = v
return cparams
def __setitem__(self, k, v):
self.globalparams[k] = v
def get(self, k, default=None):
return self.globalparams.get(k,default)
def __getitem__(self, k):
return self.globalparams[k]
def __delitem__(self, k):
del self.globalparams[k]
|
468205
|
from contextlib import contextmanager
class SQLite3DriverAdapter(object):
@staticmethod
def process_sql(_query_name, _op_type, sql):
"""Pass through function because the ``sqlite3`` driver already handles the :var_name
"named style" syntax used by anosql variables. Note, it will also accept "qmark style"
variables.
Args:
_query_name (str): The name of the sql query. Unused.
_op_type (anosql.SQLOperationType): The type of SQL operation performed by the sql.
sql (str): The sql as written before processing.
Returns:
str: Original SQL text unchanged.
"""
return sql
@staticmethod
def select(conn, _query_name, sql, parameters):
cur = conn.cursor()
cur.execute(sql, parameters)
results = cur.fetchall()
cur.close()
return results
@staticmethod
@contextmanager
def select_cursor(conn, _query_name, sql, parameters):
cur = conn.cursor()
cur.execute(sql, parameters)
try:
yield cur
finally:
cur.close()
@staticmethod
def insert_update_delete(conn, _query_name, sql, parameters):
conn.execute(sql, parameters)
@staticmethod
def insert_update_delete_many(conn, _query_name, sql, parameters):
conn.executemany(sql, parameters)
@staticmethod
def insert_returning(conn, _query_name, sql, parameters):
cur = conn.cursor()
cur.execute(sql, parameters)
results = cur.lastrowid
cur.close()
return results
@staticmethod
def execute_script(conn, sql):
conn.executescript(sql)
|
468220
|
import os
import subprocess
from rubicon_ml.exceptions import RubiconException
from rubicon_ml.repository import LocalRepository, MemoryRepository, S3Repository
class Config:
"""Used to configure `rubicon` client objects.
Configuration can be specified (in order of precedence) by:
1. environment variables 'PERSISTENCE' and 'ROOT_DIR'
2. arguments to `__init__`
Parameters
----------
persistence : str, optional
The persistence type. Can be one of ["filesystem", "memory"].
root_dir : str, optional
Absolute or relative filepath. Defaults to using the local
filesystem. Prefix with s3:// to use s3 instead.
auto_git_enabled : bool, optional
True to use the `git` command to automatically log relevant repository
information to projects and experiments logged with this client instance,
False otherwise. Defaults to False.
storage_options : dict, optional
Additional keyword arguments specific to the protocol being chosen. They
are passed directly to the underlying filesystem class.
"""
PERSISTENCE_TYPES = ["filesystem", "memory"]
REPOSITORIES = {
"memory-memory": MemoryRepository,
"filesystem-local": LocalRepository,
"filesystem-s3": S3Repository,
}
def __init__(
self, persistence=None, root_dir=None, is_auto_git_enabled=False, **storage_options
):
self.persistence, self.root_dir, self.is_auto_git_enabled = self._load_config(
persistence, root_dir, is_auto_git_enabled
)
self.storage_options = storage_options
self.repository = self._get_repository()
def _check_is_in_git_repo(self):
"""Raise a `RubiconException` if not called from within a `git` repository."""
if subprocess.run(["git", "rev-parse", "--git-dir"], capture_output=True).returncode != 0:
raise RubiconException(
"Not a `git` repo: Falied to locate the '.git' directory in this or any parent directories."
)
def _load_config(self, persistence, root_dir, is_auto_git_enabled):
"""Get the configuration values."""
persistence = os.environ.get("PERSISTENCE", persistence)
if persistence not in self.PERSISTENCE_TYPES:
raise ValueError(f"PERSISTENCE must be one of {self.PERSISTENCE_TYPES}.")
root_dir = os.environ.get("ROOT_DIR", root_dir)
if root_dir is None and persistence != "memory":
raise ValueError("root_dir cannot be None.")
if is_auto_git_enabled:
self._check_is_in_git_repo()
return (persistence, root_dir, is_auto_git_enabled)
def _get_protocol(self):
"""Get the file protocol of the configured root directory."""
if self.persistence == "memory":
return "memory"
elif self.persistence == "filesystem":
if self.root_dir.startswith("s3://"):
return "s3"
else:
return "local"
def _get_repository(self):
"""Get the repository for the configured persistence type."""
protocol = self._get_protocol()
repository_key = f"{self.persistence}-{protocol}"
repository = self.REPOSITORIES.get(repository_key)
if repository is None:
raise RubiconException(
f"{self.__class__.__module__}.{self.__class__.__name__} has no persistence "
+ f"layer for the provided configuration: `persistence`: {self.persistence}, "
+ f"`protocol` (from `root_dir`): {protocol}"
)
return repository(self.root_dir, **self.storage_options)
|
468247
|
import multiprocessing
import time
import cotyledon
class Manager(cotyledon.ServiceManager):
def __init__(self):
super(Manager, self).__init__()
queue = multiprocessing.Manager().Queue()
self.add(ProducerService, args=(queue,))
self.printer = self.add(PrinterService, args=(queue,), workers=2)
self.register_hooks(on_reload=self.reload)
def reload(self):
print("Reloading")
self.reconfigure(self.printer, 5)
class ProducerService(cotyledon.Service):
def __init__(self, worker_id, queue):
super(ProducerService, self).__init__(worker_id)
self.queue = queue
def run(self):
i = 0
while True:
self.queue.put(i)
i += 1
time.sleep(1)
class PrinterService(cotyledon.Service):
name = "printer"
def __init__(self, worker_id, queue):
super(PrinterService, self).__init__(worker_id)
self.queue = queue
def run(self):
while True:
job = self.queue.get(block=True)
print(f"I am Worker: {self.worker_id} PID: {self.pid} and I print {job}")
Manager().run()
|
468286
|
import argparse
import json
import os
from multiprocessing.sharedctypes import Value
from pathlib import Path
from shutil import copyfile
from web3.main import Web3
from web3.middleware import geth_poa_middleware
from moonworm.crawler.ethereum_state_provider import Web3StateProvider
from moonworm.watch import watch_contract
from .contracts import CU, ERC20, ERC721, CULands
from .crawler.networks import Network
from .deployment import find_deployment_block
from .generators.basic import (
generate_contract_cli_content,
generate_contract_interface_content,
)
from .generators.brownie import generate_brownie_interface
from .version import MOONWORM_VERSION
def write_file(content: str, path: str):
with open(path, "w") as ofp:
ofp.write(content)
def copy_web3_util(dest_dir: str, force: bool = False) -> None:
dest_filepath = os.path.join(dest_dir, "web3_util.py")
if os.path.isfile(dest_filepath) and not force:
print(f"{dest_filepath} file already exists. Use -f to rewrite")
web3_util_path = os.path.join(os.path.dirname(__file__), "web3_util.py")
copyfile(web3_util_path, dest_filepath)
def create_init_py(dest_dir: str, force: bool = False) -> None:
dest_filepath = os.path.join(dest_dir, "__init__.py")
if os.path.isfile(dest_filepath) and not force:
print(f"{dest_filepath} file already exists. Use -f to rewrite")
with open(dest_filepath, "w") as ofp:
ofp.write("")
def handle_generate(args: argparse.Namespace) -> None:
if not args.interface and not args.cli:
print("Please specify what you want to generate:")
print("--interface for smart contract interface")
print("--cli for smart contract cli")
return
Path(args.outdir).mkdir(exist_ok=True)
args.name = args.name + "_"
if args.abi == "erc20":
contract_abi = ERC20.abi()
write_file(
ERC20.bytecode(), os.path.join(args.outdir, args.name + "bytecode.bin")
)
elif args.abi == "erc721":
contract_abi = ERC721.abi()
write_file(
ERC721.bytecode(), os.path.join(args.outdir, args.name + "bytecode.bin")
)
else:
with open(args.abi, "r") as ifp:
contract_abi = json.load(ifp)
abi_file_name = args.name + "abi.json"
write_file(json.dumps(contract_abi), os.path.join(args.outdir, abi_file_name))
copy_web3_util(args.outdir, args.force)
create_init_py(args.outdir, args.force)
if args.interface:
interface_content = generate_contract_interface_content(
contract_abi, abi_file_name
)
interface_name = args.name + "interface.py"
write_file(interface_content, os.path.join(args.outdir, interface_name))
if args.cli:
cli_content = generate_contract_cli_content(contract_abi, abi_file_name)
cli_name = args.name + "cli.py"
write_file(cli_content, os.path.join(args.outdir, cli_name))
print(f"Files are successfully generated to:{args.outdir}")
def handle_brownie_generate(args: argparse.Namespace):
Path(args.outdir).mkdir(exist_ok=True)
project_directory = args.project
build_directory = os.path.join(project_directory, "build", "contracts")
build_file_path = os.path.join(build_directory, f"{args.name}.json")
if not os.path.isfile(build_file_path):
raise IOError(
f"File does not exist: {build_file_path}. Maybe you have to compile the smart contracts?"
)
with open(build_file_path, "r") as ifp:
build = json.load(ifp)
abi = build["abi"]
interface = generate_brownie_interface(abi, args.name)
write_file(interface, os.path.join(args.outdir, args.name + ".py"))
def handle_watch(args: argparse.Namespace) -> None:
if args.abi == "erc20":
contract_abi = ERC20.abi()
elif args.abi == "erc721":
contract_abi = ERC721.abi()
elif args.abi == "cu":
contract_abi = CU.abi()
else:
with open(args.abi, "r") as ifp:
contract_abi = json.load(ifp)
web3 = Web3(Web3.HTTPProvider(args.web3))
if args.poa:
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
if args.db:
if args.network is None:
raise ValueError("Please specify --network")
network = Network.__members__[args.network]
from moonstreamdb.db import yield_db_session_ctx
from .crawler.moonstream_ethereum_state_provider import (
MoonstreamEthereumStateProvider,
)
state_provider = MoonstreamEthereumStateProvider(web3, network)
with yield_db_session_ctx() as db_session:
try:
state_provider.set_db_session(db_session)
watch_contract(
web3=web3,
state_provider=state_provider,
contract_address=web3.toChecksumAddress(args.contract),
contract_abi=contract_abi,
num_confirmations=args.confirmations,
start_block=args.start,
end_block=args.end,
outfile=args.outfile,
)
finally:
state_provider.clear_db_session()
else:
watch_contract(
web3=web3,
state_provider=Web3StateProvider(web3),
contract_address=web3.toChecksumAddress(args.contract),
contract_abi=contract_abi,
num_confirmations=args.confirmations,
start_block=args.start,
end_block=args.end,
min_blocks_batch=args.min_blocks_batch,
max_blocks_batch=args.max_blocks_batch,
batch_size_update_threshold=args.batch_size_update_threshold,
only_events=args.only_events,
outfile=args.outfile,
)
def handle_watch_cu(args: argparse.Namespace) -> None:
from moonworm.cu_watch import watch_cu_contract
MOONSTREAM_DB_URI = os.environ.get("MOONSTREAM_DB_URI")
if not MOONSTREAM_DB_URI:
print("Please set MOONSTREAM_DB_URI environment variable")
return
if args.abi is not None:
with open(args.abi, "r") as ifp:
contract_abi = json.load(ifp)
else:
print("Using CUContract abi since no abi is specified")
contract_abi = CU.abi()
web3 = Web3(Web3.HTTPProvider(args.web3))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
watch_cu_contract(
web3,
web3.toChecksumAddress(args.contract),
contract_abi,
args.confirmations,
start_block=args.deployment_block,
force_start=args.force,
)
def handle_find_deployment(args: argparse.Namespace) -> None:
web3_client = Web3(Web3.HTTPProvider(args.web3))
result = find_deployment_block(web3_client, args.contract, args.interval)
if result is None:
raise ValueError(
f"Address does not represent a smart contract: {args.contract}"
)
print(result)
def generate_argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Moonworm: Manage your smart contract")
parser.add_argument(
"-v",
"--version",
action="version",
version=f"moonworm {MOONWORM_VERSION}",
help="Show version",
)
parser.set_defaults(func=lambda _: parser.print_help())
subcommands = parser.add_subparsers(dest="subcommands")
watch_parser = subcommands.add_parser("watch", help="Watch a contract")
watch_parser.add_argument(
"-i",
"--abi",
required=True,
help="ABI file path or 'erc20' or 'erc721' or cu",
)
watch_parser.add_argument(
"-c",
"--contract",
required=True,
help="Contract address",
)
watch_parser.add_argument(
"-w",
"--web3",
required=True,
help="Web3 provider",
)
watch_parser.add_argument(
"--db",
action="store_true",
help="Use Moonstream database specified by 'MOONSTREAM_DB_URI' to get blocks/transactions. If set, need also provide --network",
)
watch_parser.add_argument(
"--network",
choices=Network.__members__,
default=None,
help="Network name that represents models from db. If --db is set, required",
)
watch_parser.add_argument(
"--start",
"-s",
type=int,
default=None,
help="Block number to start watching from",
)
watch_parser.add_argument(
"--end",
"-e",
type=int,
default=None,
help="Block number at which to end watching",
)
watch_parser.add_argument(
"--poa",
action="store_true",
help="Pass this flag if u are using PoA network",
)
watch_parser.add_argument(
"--confirmations",
default=15,
type=int,
help="Number of confirmations to wait for. Default=15",
)
watch_parser.add_argument(
"--min-blocks-batch",
default=100,
type=int,
help="Minimum number of blocks to batch together. Default=100",
)
watch_parser.add_argument(
"--max-blocks-batch",
default=1000,
type=int,
help="Maximum number of blocks to batch together. Default=1000",
)
watch_parser.add_argument(
"--batch-size-update-threshold",
default=100,
type=int,
help="Number of minimum events before updating batch size (only for --only-events mode). Default=100",
)
watch_parser.add_argument(
"--only-events",
action="store_true",
help="Only watch events. Default=False",
)
watch_parser.add_argument(
"-o",
"--outfile",
default=None,
help="Optional JSONL (JsON lines) file into which to write events and method calls",
)
watch_parser.set_defaults(func=handle_watch)
watch_cu_parser = subcommands.add_parser(
"watch-cu", help="Watch a Crypto Unicorns contract"
)
watch_cu_parser.add_argument(
"-i",
"--abi",
default=None,
help="ABI file path, default is abi in fixtures/abis/CU.json",
)
watch_cu_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force start from given block",
)
watch_cu_parser.add_argument(
"-c",
"--contract",
required=True,
help="Contract address",
)
watch_cu_parser.add_argument(
"-w",
"--web3",
required=True,
help="Web3 provider",
)
watch_cu_parser.add_argument(
"--confirmations",
default=10,
type=int,
help="Number of confirmations to wait for. Default=12",
)
watch_cu_parser.add_argument(
"--deployment-block",
"-d",
type=int,
help="Block number of the deployment",
)
watch_cu_parser.set_defaults(func=handle_watch_cu)
generate_brownie_parser = subcommands.add_parser(
"generate-brownie", description="Moonworm code generator for brownie projects"
)
generate_brownie_parser.add_argument(
"-o",
"--outdir",
required=True,
help=f"Output directory where files will be generated.",
)
generate_brownie_parser.add_argument(
"--name",
"-n",
required=True,
help="Prefix name for generated files",
)
generate_brownie_parser.add_argument(
"-p",
"--project",
required=True,
help=f"Path to brownie project directory",
)
generate_brownie_parser.set_defaults(func=handle_brownie_generate)
generate_parser = subcommands.add_parser(
"generate", description="Moonworm code generator"
)
generate_parser.add_argument(
"-i",
"--abi",
required=True,
help=f"Path to contract abi JSON file or (erc20|erc721)",
)
generate_parser.add_argument(
"-o",
"--outdir",
required=True,
help=f"Output directory where files will be generated.",
)
generate_parser.add_argument(
"--interface",
action="store_true",
help="Generate python interface for given smart contract abi",
)
generate_parser.add_argument(
"--cli",
action="store_true",
help="Generate cli for given smart contract abi",
)
generate_parser.add_argument(
"--name",
"-n",
required=True,
help="Prefix name for generated files",
)
generate_parser.add_argument(
"--force",
"-f",
action="store_true",
help="Force rewrite generated files",
)
generate_parser.set_defaults(func=handle_generate)
find_deployment_parser = subcommands.add_parser(
"find-deployment",
description="Find the block where a smart contract was deployed",
)
find_deployment_parser.add_argument(
"-w",
"--web3",
required=True,
help="Web3 provider",
)
find_deployment_parser.add_argument(
"-c",
"--contract",
type=Web3.toChecksumAddress,
required=True,
help="Contract address",
)
find_deployment_parser.add_argument(
"-t",
"--interval",
type=float,
default=1.0,
help="Number of seconds (float) to wait between web3 calls",
)
find_deployment_parser.set_defaults(func=handle_find_deployment)
return parser
def main() -> None:
parser = generate_argument_parser()
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
468315
|
import random
import numpy as np
import torch
import torch.nn.functional as F
def floyed(r):
"""
:param r: a numpy NxN matrix with float 0,1
:return: a numpy NxN matrix with float 0,1
"""
r = np.array(r)
N = r.shape[0]
for k in range(N):
for i in range(N):
for j in range(N):
if r[i, k] > 0 and r[k, j] > 0:
r[i, j] = 1
return r
def prepare_graph(graph, config):
Xs, Rs, Ls = zip(*graph)
ls = [len(it) for it in Xs]
maxL = max(ls)
inputs = []
masks = []
labels = []
for x, r, L, l in zip(Xs, Rs, Ls, ls):
input_i = torch.LongTensor(x)
label_i = torch.LongTensor(L)
mask_i = torch.from_numpy(floyed(r)).float()
#mask_i = torch.from_numpy(np.asarray(r)).float() # no floyed
padded_input_i = F.pad(input_i, (0, maxL - l), "constant", config.PAD)
padded_label_i = F.pad(label_i, (0, maxL - l), "constant", config.PAD)
padded_mask_i = F.pad(mask_i, (0, maxL - mask_i.shape[1], 0, maxL - mask_i.shape[1]), "constant", config.PAD)
inputs.append(padded_input_i)
masks.append(padded_mask_i)
labels.append(padded_label_i)
return torch.stack(inputs), torch.stack(masks), torch.stack(labels)
def createCrossMask(n, m, N, M):
mask = torch.zeros(N+M, N+M)
mask[:n, :n] = torch.ones(n, n)
mask[N:N+m, N:N+m] = torch.ones(m, m)
mask[:n, N:N+m] = torch.ones(n, m)
mask[N:N+m, :n] = torch.ones(m, n)
return mask
def prepareCrossAttention(g1, g2):
Xs, _, _ = zip(*g1)
Ys, _, _ = zip(*g2)
lXs = [len(it) for it in Xs]
lYs = [len(it) for it in Ys]
maxLx = max(lXs)
maxLy = max(lYs)
masks = []
for lx, ly in zip(lXs, lYs):
mask = createCrossMask(lx, ly, maxLx, maxLy)
masks.append(mask)
return torch.stack(masks)
def sequence_corruption(seq, config):
label = []
masked_seq = []
for it in seq:
r = random.random()
if r < config.corruption_rate:
label.append(it)
rr = random.random()
if rr < 0.8:
masked_seq.append(config.MASK)
else:
masked_seq.append(random.choice(list(range(config.n_vocab))))
else:
label.append(config.PAD)
masked_seq.append(it)
return masked_seq, label
def apply_mask(graph_pairs, config):
g1s = []
g2s = []
for g1, g2 in graph_pairs:
X1, R1 = g1
X2, R2 = g2
X1_, L1 = sequence_corruption(X1, config)
X2_, L2 = sequence_corruption(X2, config)
g1s.append([X1_, R1, L1])
g2s.append([X2_, R2, L2])
return g1s, g2s
def prepare_train(graph_pairs, config):
# Applying Mask on Labels
masked_g1, masked_g2 = apply_mask(graph_pairs, config)
X, maskX, labelX = prepare_graph(masked_g1, config)
maskX_ = maskX.transpose(-2, -1)
Y, maskY, labelY = prepare_graph(masked_g2, config)
maskY_ = maskY.transpose(-2, -1)
maskXY = prepareCrossAttention(masked_g1, masked_g2)
return X, maskX, maskX_, Y, maskY, maskY_, maskXY, torch.cat([labelX, labelY], dim=-1)
def analytic():
import matplotlib.pyplot as plt
fig, axes = plt.subplots(1,1)
data_1 = torch.load('data/nasbench101/train_data.pt')
data_2 = torch.load('data/nasbench101/test_data.pt')
params = []
for data in [data_1, data_2]:
for i in range(len(data)):
params.append(data[i]['params'])
axes.hist(params, bins=50, label=['params'])
axes.set_xlabel('number of trainable model parameters', fontsize=12)
axes.set_ylabel('frequency', fontsize=12)
axes.set_title('Histogram for model parameters on NASBench-101', fontsize=13)
plt.show()
if __name__ == '__main__':
analytic()
|
468316
|
import torch
from torch import nn
from torch import Tensor
class Convolution(nn.Module):
def __init__(self, _in: int, _out: int, kernel_size: int, residual: bool):
"""
기본적인 Convolution - BN - Relu 블럭입니다.
:param _in: 입력 채널 사이즈
:param _out: 출력 채널 사이즈
:param kernel_size: 커널 사이즈
:param residual: skip connection 여부
"""
super().__init__()
self.conv = nn.Conv1d(in_channels=_in,
out_channels=_out,
kernel_size=kernel_size,
padding=kernel_size // 2)
self.norm = nn.BatchNorm1d(_out)
self.relu = nn.ReLU()
self.residual = residual
def forward(self, x: Tensor) -> Tensor:
_x = x
x = self.conv(x) # convolution
x = self.norm(x) # batch normalization
x = self.relu(x) # relu activation
# residual connection
return x + _x \
if x.size() == _x.size() and self.residual \
else x
|
468327
|
from . import any_drivers
from . import io_tam
from . import tam_surface
from . import tam_colors
from . import tam_drivers
from . import tam_identifier
from . import tam_keys
from . import uni_drivers
from . import win_drivers
from . import ansi_256_drivers
from . import ansi_true_color_drivers
from . import tcp_io
from . import lin_drivers
from . import mac_drivers
__all__ = ("any_drivers",
"io_tam",
"tam_surface",
"tam_colors",
"tam_drivers",
"tam_identifier",
"tam_keys",
"uni_drivers",
"win_drivers",
"ansi_256_drivers",
"ansi_true_color_drivers",
"tcp_io",
"lin_drivers",
"mac_drivers")
|
468337
|
import torch
from collections import OrderedDict
from score_following_game.reinforcement_learning.algorithms.a2c import A2CAgent
from score_following_game.reinforcement_learning.algorithms.agent import Agent
from score_following_game.reinforcement_learning.torch_extentions.distributions.adapted_categorical import AdaptedCategorical
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
class PPOAgent(A2CAgent):
def __init__(self, observation_space, model, n_actions=1, t_max=5, n_worker=1, gamma=0.99, gae_lambda=0.95, ppo_epoch=4,
epsilon=0.2, clip_value=False, batch_size=32, distribution=AdaptedCategorical, use_cuda=torch.cuda.is_available(),
log_writer=None, log_interval=10, evaluator=None, eval_interval=5000, lr_scheduler=None,
score_name=None, high_is_better=False, dump_interval=100000, dump_dir=None, buffer=None):
A2CAgent.__init__(self, observation_space=observation_space, model=model, n_actions=n_actions, t_max=t_max, n_worker=n_worker,
gamma=gamma, gae_lambda=gae_lambda, gae=True, distribution=distribution, use_cuda=use_cuda,
log_writer=log_writer, log_interval=log_interval, evaluator=evaluator, eval_interval=eval_interval,
lr_scheduler=lr_scheduler, score_name=score_name, high_is_better=high_is_better,
dump_interval=dump_interval, dump_dir=dump_dir, buffer=buffer)
self.ppo_epoch = ppo_epoch
self.epsilon = epsilon
self.batch_size = batch_size
self.clip_value = clip_value
self.alpha = 1
def perform_update(self):
Agent.perform_update(self)
with torch.no_grad():
self.value_predictions[-1] = self.model.forward_value(self.prepare_model_input(-1))
gae = 0
for step in reversed(range(self.t_max)):
delta = self.rewards[step] + self.gamma * self.value_predictions[step + 1] * self.masks[step] \
- self.value_predictions[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step] * gae
self.returns[step] = gae + self.value_predictions[step]
advantages = self.returns[:-1] - self.value_predictions[:-1]
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-5)
value_loss_epoch = 0
policy_loss_epoch = 0
dist_entropy_epoch = 0
explained_variance_epoch = 0
n_updates = 0
clip = self.epsilon * self.alpha
for _ in range(self.ppo_epoch):
sampler = BatchSampler(SubsetRandomSampler(list(range(self.n_worker * self.t_max))),
self.batch_size, drop_last=False)
for indices in sampler:
actions_batch = self.actions.view(self.n_worker * self.t_max, -1)[indices]
return_batch = self.returns[:-1].view(-1, 1)[indices]
old_log_probs_batch = self.old_log_probs.view(-1, *self.old_log_probs.size()[2:])[indices]
model_returns = self.model(self.prepare_batch_input(indices))
policy = model_returns['policy']
values = model_returns['value']
action_log_probabilities = self.model.get_log_probs(policy, actions_batch)
ratio = torch.exp(action_log_probabilities - old_log_probs_batch)
advantage_target = advantages.view(-1, 1)[indices]
surr1 = ratio * advantage_target
surr2 = ratio.clamp(1.0 - clip, 1.0 + clip) * advantage_target
policy_loss = -torch.min(surr1, surr2).mean(dim=0)
dist_entropy = self.model.calc_entropy(policy)
# clip value loss according to
# https://github.com/openai/baselines/tree/master/baselines/ppo2
if self.clip_value:
value_preds_batch = self.value_predictions[:-1].view(-1, 1)[indices]
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-clip, clip)
value_losses = (return_batch - values).pow(2)
value_losses_clipped = (return_batch - value_pred_clipped).pow(2)
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped).mean(dim=0)
else:
value_loss = 0.5*(return_batch - values).pow(2).mean(dim=0)
losses = dict(policy_loss=policy_loss, value_loss=value_loss,
dist_entropy=dist_entropy)
self.model.update(losses)
value_loss_epoch += value_loss.item()
policy_loss_epoch += policy_loss.item()
dist_entropy_epoch += dist_entropy.item()
explained_variance_epoch += ((1 - (return_batch-values.detach()).var())
/ return_batch.var()).item()
n_updates += 1
value_loss_epoch /= n_updates
policy_loss_epoch /= n_updates
dist_entropy_epoch /= n_updates
explained_variance_epoch /= n_updates
# logging
self.log_dict = {
'policy_loss': policy_loss_epoch,
'value_loss': value_loss_epoch,
'entropy': dist_entropy_epoch,
'explained_var': explained_variance_epoch,
'avg_reward': self.final_rewards.mean(),
'median_reward': self.final_rewards.median(),
'ppo_epsilon': clip
}
def prepare_batch_input(self, indices):
states_batch = OrderedDict()
for obs_key in self.observations:
obs = self.observations[obs_key]
states_batch[obs_key] = obs[:-1].view(-1, *obs.size()[2:])[indices]
return states_batch
|
468425
|
import typing as tp
from abc import ABC, abstractmethod
import httpx
class BaseCache(ABC):
@abstractmethod
def get(self, request: httpx.Request) -> tp.Optional[httpx.Response]:
"""Get cached response from Cache.
We use the httpx.Request.url as key.
Args:
request: httpx.Request
Returns:
tp.Optional[httpx.Response]
"""
@abstractmethod
async def aget(self, request: httpx.Request) -> tp.Optional[httpx.Response]:
"""(Async) Get cached response from Cache.
We use the httpx.Request.url as key.
Args:
request: httpx.Request
Returns:
tp.Optional[httpx.Response]
"""
@abstractmethod
def set(
self,
*,
request: httpx.Request,
response: httpx.Response,
content: tp.Optional[bytes] = None
) -> None:
"""Set new response entry in cache.
In case the response does not yet have a '_content' property, content should
be provided in the optional 'content' kwarg (usually using a callback)
Args:
request: httpx.Request
response: httpx.Response, to cache
content (bytes, optional): Defaults to None, should be provided in case
response that not have yet content.
"""
@abstractmethod
async def aset(
self,
*,
request: httpx.Request,
response: httpx.Response,
content: tp.Optional[bytes] = None
) -> None:
"""(Async) Set new response entry in cache.
In case the response does not yet have a '_content' property, content should
be provided in the optional 'content' kwarg (usually using a callback)
Args:
request: httpx.Request
response: httpx.Response, to cache
content (bytes, optional): Defaults to None, should be provided in case
response that not have yet content.
"""
@abstractmethod
def delete(self, request: httpx.Request) -> None:
"""Delete an entry from cache.
Args:
request: httpx.Request
"""
@abstractmethod
async def adelete(self, request: httpx.Request) -> None:
"""(Async) Delete an entry from cache.
Args:
request: httpx.Request
"""
def close(self) -> None:
"""Close cache."""
async def aclose(self) -> None:
"""(Async) Close cache."""
|
468436
|
from contextlib import contextmanager
from typing import NamedTuple
import pytest
import trio
from libp2p.exceptions import ValidationError
from libp2p.pubsub.pb import rpc_pb2
from libp2p.pubsub.pubsub import PUBSUB_SIGNING_PREFIX, SUBSCRIPTION_CHANNEL_SIZE
from libp2p.tools.constants import MAX_READ_LEN
from libp2p.tools.factories import IDFactory, PubsubFactory, net_stream_pair_factory
from libp2p.tools.pubsub.utils import make_pubsub_msg
from libp2p.tools.utils import connect
from libp2p.utils import encode_varint_prefixed
TESTING_TOPIC = "TEST_SUBSCRIBE"
TESTING_DATA = b"data"
@pytest.mark.trio
async def test_subscribe_and_unsubscribe():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids
await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
assert TESTING_TOPIC not in pubsubs_fsub[0].topic_ids
@pytest.mark.trio
async def test_re_subscribe():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids
@pytest.mark.trio
async def test_re_unsubscribe():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
# Unsubscribe from topic we didn't even subscribe to
assert "NOT_MY_TOPIC" not in pubsubs_fsub[0].topic_ids
await pubsubs_fsub[0].unsubscribe("NOT_MY_TOPIC")
assert "NOT_MY_TOPIC" not in pubsubs_fsub[0].topic_ids
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
assert TESTING_TOPIC in pubsubs_fsub[0].topic_ids
await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
assert TESTING_TOPIC not in pubsubs_fsub[0].topic_ids
await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
assert TESTING_TOPIC not in pubsubs_fsub[0].topic_ids
@pytest.mark.trio
async def test_peers_subscribe():
async with PubsubFactory.create_batch_with_floodsub(2) as pubsubs_fsub:
await connect(pubsubs_fsub[0].host, pubsubs_fsub[1].host)
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
# Yield to let 0 notify 1
await trio.sleep(1)
assert pubsubs_fsub[0].my_id in pubsubs_fsub[1].peer_topics[TESTING_TOPIC]
await pubsubs_fsub[0].unsubscribe(TESTING_TOPIC)
# Yield to let 0 notify 1
await trio.sleep(1)
assert pubsubs_fsub[0].my_id not in pubsubs_fsub[1].peer_topics[TESTING_TOPIC]
@pytest.mark.trio
async def test_get_hello_packet():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
def _get_hello_packet_topic_ids():
packet = pubsubs_fsub[0].get_hello_packet()
return tuple(sub.topicid for sub in packet.subscriptions)
# Test: No subscription, so there should not be any topic ids in the hello packet.
assert len(_get_hello_packet_topic_ids()) == 0
# Test: After subscriptions, topic ids should be in the hello packet.
topic_ids = ["t", "o", "p", "i", "c"]
for topic in topic_ids:
await pubsubs_fsub[0].subscribe(topic)
topic_ids_in_hello = _get_hello_packet_topic_ids()
for topic in topic_ids:
assert topic in topic_ids_in_hello
@pytest.mark.trio
async def test_set_and_remove_topic_validator():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
is_sync_validator_called = False
def sync_validator(peer_id, msg):
nonlocal is_sync_validator_called
is_sync_validator_called = True
is_async_validator_called = False
async def async_validator(peer_id, msg):
nonlocal is_async_validator_called
is_async_validator_called = True
await trio.lowlevel.checkpoint()
topic = "TEST_VALIDATOR"
assert topic not in pubsubs_fsub[0].topic_validators
# Register sync validator
pubsubs_fsub[0].set_topic_validator(topic, sync_validator, False)
assert topic in pubsubs_fsub[0].topic_validators
topic_validator = pubsubs_fsub[0].topic_validators[topic]
assert not topic_validator.is_async
# Validate with sync validator
topic_validator.validator(peer_id=IDFactory(), msg="msg")
assert is_sync_validator_called
assert not is_async_validator_called
# Register with async validator
pubsubs_fsub[0].set_topic_validator(topic, async_validator, True)
is_sync_validator_called = False
assert topic in pubsubs_fsub[0].topic_validators
topic_validator = pubsubs_fsub[0].topic_validators[topic]
assert topic_validator.is_async
# Validate with async validator
await topic_validator.validator(peer_id=IDFactory(), msg="msg")
assert is_async_validator_called
assert not is_sync_validator_called
# Remove validator
pubsubs_fsub[0].remove_topic_validator(topic)
assert topic not in pubsubs_fsub[0].topic_validators
@pytest.mark.trio
async def test_get_msg_validators():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
times_sync_validator_called = 0
def sync_validator(peer_id, msg):
nonlocal times_sync_validator_called
times_sync_validator_called += 1
times_async_validator_called = 0
async def async_validator(peer_id, msg):
nonlocal times_async_validator_called
times_async_validator_called += 1
await trio.lowlevel.checkpoint()
topic_1 = "TEST_VALIDATOR_1"
topic_2 = "TEST_VALIDATOR_2"
topic_3 = "TEST_VALIDATOR_3"
# Register sync validator for topic 1 and 2
pubsubs_fsub[0].set_topic_validator(topic_1, sync_validator, False)
pubsubs_fsub[0].set_topic_validator(topic_2, sync_validator, False)
# Register async validator for topic 3
pubsubs_fsub[0].set_topic_validator(topic_3, async_validator, True)
msg = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[topic_1, topic_2, topic_3],
data=b"1234",
seqno=b"\x00" * 8,
)
topic_validators = pubsubs_fsub[0].get_msg_validators(msg)
for topic_validator in topic_validators:
if topic_validator.is_async:
await topic_validator.validator(peer_id=IDFactory(), msg="msg")
else:
topic_validator.validator(peer_id=IDFactory(), msg="msg")
assert times_sync_validator_called == 2
assert times_async_validator_called == 1
@pytest.mark.parametrize(
"is_topic_1_val_passed, is_topic_2_val_passed",
((False, True), (True, False), (True, True)),
)
@pytest.mark.trio
async def test_validate_msg(is_topic_1_val_passed, is_topic_2_val_passed):
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
def passed_sync_validator(peer_id, msg):
return True
def failed_sync_validator(peer_id, msg):
return False
async def passed_async_validator(peer_id, msg):
await trio.lowlevel.checkpoint()
return True
async def failed_async_validator(peer_id, msg):
await trio.lowlevel.checkpoint()
return False
topic_1 = "TEST_SYNC_VALIDATOR"
topic_2 = "TEST_ASYNC_VALIDATOR"
if is_topic_1_val_passed:
pubsubs_fsub[0].set_topic_validator(topic_1, passed_sync_validator, False)
else:
pubsubs_fsub[0].set_topic_validator(topic_1, failed_sync_validator, False)
if is_topic_2_val_passed:
pubsubs_fsub[0].set_topic_validator(topic_2, passed_async_validator, True)
else:
pubsubs_fsub[0].set_topic_validator(topic_2, failed_async_validator, True)
msg = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[topic_1, topic_2],
data=b"1234",
seqno=b"\x00" * 8,
)
if is_topic_1_val_passed and is_topic_2_val_passed:
await pubsubs_fsub[0].validate_msg(pubsubs_fsub[0].my_id, msg)
else:
with pytest.raises(ValidationError):
await pubsubs_fsub[0].validate_msg(pubsubs_fsub[0].my_id, msg)
@pytest.mark.trio
async def test_continuously_read_stream(monkeypatch, nursery, security_protocol):
async def wait_for_event_occurring(event):
await trio.lowlevel.checkpoint()
with trio.fail_after(0.1):
await event.wait()
class Events(NamedTuple):
push_msg: trio.Event
handle_subscription: trio.Event
handle_rpc: trio.Event
@contextmanager
def mock_methods():
event_push_msg = trio.Event()
event_handle_subscription = trio.Event()
event_handle_rpc = trio.Event()
async def mock_push_msg(msg_forwarder, msg):
event_push_msg.set()
await trio.lowlevel.checkpoint()
def mock_handle_subscription(origin_id, sub_message):
event_handle_subscription.set()
async def mock_handle_rpc(rpc, sender_peer_id):
event_handle_rpc.set()
await trio.lowlevel.checkpoint()
with monkeypatch.context() as m:
m.setattr(pubsubs_fsub[0], "push_msg", mock_push_msg)
m.setattr(pubsubs_fsub[0], "handle_subscription", mock_handle_subscription)
m.setattr(pubsubs_fsub[0].router, "handle_rpc", mock_handle_rpc)
yield Events(event_push_msg, event_handle_subscription, event_handle_rpc)
async with PubsubFactory.create_batch_with_floodsub(
1, security_protocol=security_protocol
) as pubsubs_fsub, net_stream_pair_factory(
security_protocol=security_protocol
) as stream_pair:
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
# Kick off the task `continuously_read_stream`
nursery.start_soon(pubsubs_fsub[0].continuously_read_stream, stream_pair[0])
# Test: `push_msg` is called when publishing to a subscribed topic.
publish_subscribed_topic = rpc_pb2.RPC(
publish=[rpc_pb2.Message(topicIDs=[TESTING_TOPIC])]
)
with mock_methods() as events:
await stream_pair[1].write(
encode_varint_prefixed(publish_subscribed_topic.SerializeToString())
)
await wait_for_event_occurring(events.push_msg)
# Make sure the other events are not emitted.
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.handle_subscription)
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.handle_rpc)
# Test: `push_msg` is not called when publishing to a topic-not-subscribed.
publish_not_subscribed_topic = rpc_pb2.RPC(
publish=[rpc_pb2.Message(topicIDs=["NOT_SUBSCRIBED"])]
)
with mock_methods() as events:
await stream_pair[1].write(
encode_varint_prefixed(publish_not_subscribed_topic.SerializeToString())
)
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.push_msg)
# Test: `handle_subscription` is called when a subscription message is received.
subscription_msg = rpc_pb2.RPC(subscriptions=[rpc_pb2.RPC.SubOpts()])
with mock_methods() as events:
await stream_pair[1].write(
encode_varint_prefixed(subscription_msg.SerializeToString())
)
await wait_for_event_occurring(events.handle_subscription)
# Make sure the other events are not emitted.
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.push_msg)
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.handle_rpc)
# Test: `handle_rpc` is called when a control message is received.
control_msg = rpc_pb2.RPC(control=rpc_pb2.ControlMessage())
with mock_methods() as events:
await stream_pair[1].write(
encode_varint_prefixed(control_msg.SerializeToString())
)
await wait_for_event_occurring(events.handle_rpc)
# Make sure the other events are not emitted.
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.push_msg)
with pytest.raises(trio.TooSlowError):
await wait_for_event_occurring(events.handle_subscription)
# TODO: Add the following tests after they are aligned with Go.
# (Issue #191: https://github.com/libp2p/py-libp2p/issues/191)
# - `test_stream_handler`
# - `test_handle_peer_queue`
@pytest.mark.trio
async def test_handle_subscription():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
assert len(pubsubs_fsub[0].peer_topics) == 0
sub_msg_0 = rpc_pb2.RPC.SubOpts(subscribe=True, topicid=TESTING_TOPIC)
peer_ids = [IDFactory() for _ in range(2)]
# Test: One peer is subscribed
pubsubs_fsub[0].handle_subscription(peer_ids[0], sub_msg_0)
assert (
len(pubsubs_fsub[0].peer_topics) == 1
and TESTING_TOPIC in pubsubs_fsub[0].peer_topics
)
assert len(pubsubs_fsub[0].peer_topics[TESTING_TOPIC]) == 1
assert peer_ids[0] in pubsubs_fsub[0].peer_topics[TESTING_TOPIC]
# Test: Another peer is subscribed
pubsubs_fsub[0].handle_subscription(peer_ids[1], sub_msg_0)
assert len(pubsubs_fsub[0].peer_topics) == 1
assert len(pubsubs_fsub[0].peer_topics[TESTING_TOPIC]) == 2
assert peer_ids[1] in pubsubs_fsub[0].peer_topics[TESTING_TOPIC]
# Test: Subscribe to another topic
another_topic = "ANOTHER_TOPIC"
sub_msg_1 = rpc_pb2.RPC.SubOpts(subscribe=True, topicid=another_topic)
pubsubs_fsub[0].handle_subscription(peer_ids[0], sub_msg_1)
assert len(pubsubs_fsub[0].peer_topics) == 2
assert another_topic in pubsubs_fsub[0].peer_topics
assert peer_ids[0] in pubsubs_fsub[0].peer_topics[another_topic]
# Test: unsubscribe
unsub_msg = rpc_pb2.RPC.SubOpts(subscribe=False, topicid=TESTING_TOPIC)
pubsubs_fsub[0].handle_subscription(peer_ids[0], unsub_msg)
assert peer_ids[0] not in pubsubs_fsub[0].peer_topics[TESTING_TOPIC]
@pytest.mark.trio
async def test_handle_talk():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
sub = await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
msg_0 = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[TESTING_TOPIC],
data=b"1234",
seqno=b"\x00" * 8,
)
pubsubs_fsub[0].notify_subscriptions(msg_0)
msg_1 = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=["NOT_SUBSCRIBED"],
data=b"1234",
seqno=b"\x11" * 8,
)
pubsubs_fsub[0].notify_subscriptions(msg_1)
assert (
len(pubsubs_fsub[0].topic_ids) == 1
and sub == pubsubs_fsub[0].subscribed_topics_receive[TESTING_TOPIC]
)
assert (await sub.get()) == msg_0
@pytest.mark.trio
async def test_message_all_peers(monkeypatch, security_protocol):
async with PubsubFactory.create_batch_with_floodsub(
1, security_protocol=security_protocol
) as pubsubs_fsub, net_stream_pair_factory(
security_protocol=security_protocol
) as stream_pair:
peer_id = IDFactory()
mock_peers = {peer_id: stream_pair[0]}
with monkeypatch.context() as m:
m.setattr(pubsubs_fsub[0], "peers", mock_peers)
empty_rpc = rpc_pb2.RPC()
empty_rpc_bytes = empty_rpc.SerializeToString()
empty_rpc_bytes_len_prefixed = encode_varint_prefixed(empty_rpc_bytes)
await pubsubs_fsub[0].message_all_peers(empty_rpc_bytes)
assert (
await stream_pair[1].read(MAX_READ_LEN)
) == empty_rpc_bytes_len_prefixed
@pytest.mark.trio
async def test_subscribe_and_publish():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
list_data = [b"d0", b"d1"]
event_receive_data_started = trio.Event()
async def publish_data(topic):
await event_receive_data_started.wait()
for data in list_data:
await pubsub.publish(topic, data)
async def receive_data(topic):
i = 0
event_receive_data_started.set()
assert topic not in pubsub.topic_ids
subscription = await pubsub.subscribe(topic)
async with subscription:
assert topic in pubsub.topic_ids
async for msg in subscription:
assert msg.data == list_data[i]
i += 1
if i == len(list_data):
break
assert topic not in pubsub.topic_ids
async with trio.open_nursery() as nursery:
nursery.start_soon(receive_data, TESTING_TOPIC)
nursery.start_soon(publish_data, TESTING_TOPIC)
@pytest.mark.trio
async def test_subscribe_and_publish_full_channel():
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
pubsub = pubsubs_fsub[0]
extra_data_0 = b"extra_data_0"
extra_data_1 = b"extra_data_1"
# Test: Subscription channel is of size `SUBSCRIPTION_CHANNEL_SIZE`.
# When the channel is full, new received messages are dropped.
# After the channel has empty slot, the channel can receive new messages.
# Assume `SUBSCRIPTION_CHANNEL_SIZE` is smaller than `2**(4*8)`.
list_data = [i.to_bytes(4, "big") for i in range(SUBSCRIPTION_CHANNEL_SIZE)]
# Expect `extra_data_0` is dropped and `extra_data_1` is appended.
expected_list_data = list_data + [extra_data_1]
subscription = await pubsub.subscribe(TESTING_TOPIC)
for data in list_data:
await pubsub.publish(TESTING_TOPIC, data)
# Publish `extra_data_0` which should be dropped since the channel is already full.
await pubsub.publish(TESTING_TOPIC, extra_data_0)
# Consume a message and there is an empty slot in the channel.
assert (await subscription.get()).data == expected_list_data.pop(0)
# Publish `extra_data_1` which should be appended to the channel.
await pubsub.publish(TESTING_TOPIC, extra_data_1)
for expected_data in expected_list_data:
assert (await subscription.get()).data == expected_data
@pytest.mark.trio
async def test_publish_push_msg_is_called(monkeypatch):
msg_forwarders = []
msgs = []
async def push_msg(msg_forwarder, msg):
msg_forwarders.append(msg_forwarder)
msgs.append(msg)
await trio.lowlevel.checkpoint()
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
with monkeypatch.context() as m:
m.setattr(pubsubs_fsub[0], "push_msg", push_msg)
await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
assert (
len(msgs) == 2
), "`push_msg` should be called every time `publish` is called"
assert (msg_forwarders[0] == msg_forwarders[1]) and (
msg_forwarders[1] == pubsubs_fsub[0].my_id
)
assert (
msgs[0].seqno != msgs[1].seqno
), "`seqno` should be different every time"
@pytest.mark.trio
async def test_push_msg(monkeypatch):
async with PubsubFactory.create_batch_with_floodsub(1) as pubsubs_fsub:
msg_0 = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x00" * 8,
)
@contextmanager
def mock_router_publish():
event = trio.Event()
async def router_publish(*args, **kwargs):
event.set()
await trio.lowlevel.checkpoint()
with monkeypatch.context() as m:
m.setattr(pubsubs_fsub[0].router, "publish", router_publish)
yield event
with mock_router_publish() as event:
# Test: `msg` is not seen before `push_msg`, and is seen after `push_msg`.
assert not pubsubs_fsub[0]._is_msg_seen(msg_0)
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg_0)
assert pubsubs_fsub[0]._is_msg_seen(msg_0)
# Test: Ensure `router.publish` is called in `push_msg`
with trio.fail_after(0.1):
await event.wait()
with mock_router_publish() as event:
# Test: `push_msg` the message again and it will be reject.
# `router_publish` is not called then.
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg_0)
await trio.sleep(0.01)
assert not event.is_set()
sub = await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
# Test: `push_msg` succeeds with another unseen msg.
msg_1 = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x11" * 8,
)
assert not pubsubs_fsub[0]._is_msg_seen(msg_1)
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg_1)
assert pubsubs_fsub[0]._is_msg_seen(msg_1)
with trio.fail_after(0.1):
await event.wait()
# Test: Subscribers are notified when `push_msg` new messages.
assert (await sub.get()) == msg_1
with mock_router_publish() as event:
# Test: add a topic validator and `push_msg` the message that
# does not pass the validation.
# `router_publish` is not called then.
def failed_sync_validator(peer_id, msg):
return False
pubsubs_fsub[0].set_topic_validator(
TESTING_TOPIC, failed_sync_validator, False
)
msg_2 = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x22" * 8,
)
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg_2)
await trio.sleep(0.01)
assert not event.is_set()
@pytest.mark.trio
async def test_strict_signing():
async with PubsubFactory.create_batch_with_floodsub(
2, strict_signing=True
) as pubsubs_fsub:
await connect(pubsubs_fsub[0].host, pubsubs_fsub[1].host)
await pubsubs_fsub[0].subscribe(TESTING_TOPIC)
await pubsubs_fsub[1].subscribe(TESTING_TOPIC)
await trio.sleep(1)
await pubsubs_fsub[0].publish(TESTING_TOPIC, TESTING_DATA)
await trio.sleep(1)
assert len(pubsubs_fsub[0].seen_messages) == 1
assert len(pubsubs_fsub[1].seen_messages) == 1
@pytest.mark.trio
async def test_strict_signing_failed_validation(monkeypatch):
async with PubsubFactory.create_batch_with_floodsub(
2, strict_signing=True
) as pubsubs_fsub:
msg = make_pubsub_msg(
origin_id=pubsubs_fsub[0].my_id,
topic_ids=[TESTING_TOPIC],
data=TESTING_DATA,
seqno=b"\x00" * 8,
)
priv_key = pubsubs_fsub[0].sign_key
signature = priv_key.sign(
PUBSUB_SIGNING_PREFIX.encode() + msg.SerializeToString()
)
event = trio.Event()
def _is_msg_seen(msg):
return False
# Use router publish to check if `push_msg` succeed.
async def router_publish(*args, **kwargs):
await trio.lowlevel.checkpoint()
# The event will only be set if `push_msg` succeed.
event.set()
monkeypatch.setattr(pubsubs_fsub[0], "_is_msg_seen", _is_msg_seen)
monkeypatch.setattr(pubsubs_fsub[0].router, "publish", router_publish)
# Test: no signature attached in `msg`
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
await trio.sleep(0.01)
assert not event.is_set()
# Test: `msg.key` does not match `msg.from_id`
msg.key = pubsubs_fsub[1].host.get_public_key().serialize()
msg.signature = signature
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
await trio.sleep(0.01)
assert not event.is_set()
# Test: invalid signature
msg.key = pubsubs_fsub[0].host.get_public_key().serialize()
msg.signature = b"\x12" * 100
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
await trio.sleep(0.01)
assert not event.is_set()
# Finally, assert the signature indeed will pass validation
msg.key = pubsubs_fsub[0].host.get_public_key().serialize()
msg.signature = signature
await pubsubs_fsub[0].push_msg(pubsubs_fsub[0].my_id, msg)
await trio.sleep(0.01)
assert event.is_set()
|
468442
|
import os
import sys
import apache_beam.options.pipeline_options as pipeline_options
class PipelineCLIOptions(pipeline_options.StandardOptions,
pipeline_options.WorkerOptions,
pipeline_options.SetupOptions,
pipeline_options.GoogleCloudOptions):
"""A unified arguments parser.
This parser directly exposes all the underlying Beam
options available to the user (along with some custom
arguments). To use, simply pass the arguments list as
`PipelineCLIOptions(argv)`.
Args:
argv: A list of strings representing CLI options.
"""
@classmethod
def _add_argparse_args(cls, parser):
add_parser_arguments(parser)
def add_parser_arguments(parser):
additional_args_parser = parser.add_argument_group('Custom Arguments')
additional_args_parser.add_argument('--target_dataset', metavar='', type=str,
help='BigQuery dataset for output results')
additional_args_parser.add_argument('--pre_transformed', action='store_true',
help='Use a pre-transformed BigQuery dataset')
additional_args_parser.add_argument('--wait_until_finished', action='store_true',
help='Wait until preprocess job is finished')
additional_args_parser.add_argument('--github_files', default='',
help=('If specified read the GitHub dataset '
'from the specified json files. Each line of text '
'should be a json record with two fields content and '
'repo_path'))
additional_args_parser.add_argument('--github_table', default='',
help=('If specified read the entire GitHub dataset '
'specified as PROJECT:DATASET.TABLE. If not '
'specified we run a query to filter the data.'))
additional_args_parser.add_argument('--failed_tokenize_table', metavar='', type=str,
help='The BigQuery table containing the '
'failed tokenize entry. This should be '
'of the form PROJECT:DATASET.TABLE.')
predict_args_parser = parser.add_argument_group('Batch Prediction Arguments')
predict_args_parser.add_argument('--token_pairs_table', metavar='', type=str,
help='The BigQuery table containing the '
'token pairs. This should be '
'of the form PROJECT:DATASET.TABLE.')
predict_args_parser.add_argument('--function_embeddings_table', metavar='', type=str,
help='The BigQuery table to write the '
'function embeddings too. This should be '
'of the form PROJECT:DATASET.TABLE.')
predict_args_parser.add_argument('--problem', metavar='', type=str,
help='Name of the T2T problem')
predict_args_parser.add_argument('--data_dir', metavar='', type=str,
help='Path to directory of the T2T problem data')
predict_args_parser.add_argument('--saved_model_dir', metavar='', type=str,
help='Path to directory containing Tensorflow SavedModel')
predict_args_parser.add_argument('--output_dir', metavar='', type=str,
help='Path to directory where the output '
'should should be written.')
def prepare_pipeline_opts(argv=None):
"""Prepare pipeline options from CLI arguments.
This uses the unified PipelineCLIOptions parser
and adds modifications on top. It adds a `setup_file`
to allow installation of dependencies on Dataflow workers.
These implicit changes allow ease-of-use.
Use `-h` CLI argument to see the list of all possible
arguments.
Args:
argv: A list of strings representing the CLI arguments.
Returns:
A PipelineCLIOptions object whose `_visible_options`
contains the parsed Namespace object.
"""
argv = argv or sys.argv[1:]
argv.extend([
'--setup_file',
os.path.abspath(os.path.join(__file__, '../../../../setup.py')),
])
pipeline_opts = PipelineCLIOptions(flags=argv)
return pipeline_opts
|
468551
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.abspath(os.path.join(BASE_DIR,os.pardir,'Data'))
ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, os.pardir))
|
468594
|
from . import loss_functional as LF
import torch
def l1(params):
return branched_loss(LF.l1_loss, params)
def l2(params):
return branched_loss(LF.l2_loss, params)
def l1_attention(params):
return branched_loss(LF.l1_attention_loss, params)
def branched_loss(loss_function, params):
"""
Args
loss_function: The loss functional that is actually computing the loss
params: all the parameters, including
branches: The tensor containing all the branches branches output from the network
targets: The ground truth targets that the network should produce
controls: the controls used for each point
branches weights: the weigths that each branch will have on the loss function
speed_gt: the ground truth speed for these data points
variable_weights: The weights for each of the variables used
For other losses it could contain more parameters
Returns
The computed loss function, but also a dictionary with plotable variables for tensorboard
"""
controls_mask = LF.compute_branches_masks(params['controls'],
params['branches'][0].shape[1])
# Update the dictionary to add also the controls mask.
params.update({'controls_mask': controls_mask})
# calculate loss for each branch with specific activation
loss_branches_vec, plotable_params = loss_function(params)
# Apply the variable weights
# This is applied to all branches except the last one, that is the speed branch...
# TODO This is hardcoded to have 4 branches not using speed.
for i in range(4):
loss_branches_vec[i] = loss_branches_vec[i][:, 0] * params['variable_weights']['Steer'] \
+ loss_branches_vec[i][:, 1] * params['variable_weights']['Gas'] \
+ loss_branches_vec[i][:, 2] * params['variable_weights']['Brake']
loss_function = loss_branches_vec[0] + loss_branches_vec[1] + loss_branches_vec[2] + \
loss_branches_vec[3]
speed_loss = loss_branches_vec[4]/(params['branches'][0].shape[0])
return torch.sum(loss_function) / (params['branches'][0].shape[0])\
+ torch.sum(speed_loss) / (params['branches'][0].shape[0]),\
plotable_params
def Loss(loss_name):
""" Factory function
Note: It is defined with the first letter as uppercase even though is a function to contrast
the actual use of this function that is making classes
"""
# TODO: this could be extended to some more arbitrary definition
if loss_name == 'L1':
return l1
elif loss_name == 'L2':
return l2
else:
raise ValueError(" Not found Loss name")
|
468608
|
import csv
import re
from collections import defaultdict
from os.path import join
from ngs_utils import logger
from ngs_utils.file_utils import verify_file, verify_dir, adjust_path
from ngs_utils.utils import OrderedDefaultDict
import vcf_stuff.filtering.ngs_reporting.reference_data as filt_ref_data
from vcf_stuff.filtering.ngs_reporting.utils import parse_gene_blacklists, iter_lines, parse_genes_list, check_gene_in_a_blacklist, get_anno_config
class Rule:
def __init__(self, gene, chrom=None, start=None, end=None, length=None, ref=None,
required_inframe=None, indel_type=None, change=None, action=None, note=None):
self.gene = gene
self.chrom = chrom
self.start = start
self.end = end
self.length = length
self.ref = ref
self.required_inframe = required_inframe
self.indel_type = indel_type
self.change = change
self.action = action
self.note = note
def parse_mut_tp53(mut_fpath):
mut_tp53 = set()
if verify_file(mut_fpath):
with open(mut_fpath) as f:
for l in f:
l = l.strip()
if not l:
continue
line = l.split('\t')
if not line[19] or 'p.' not in line[19]:
continue
prot = line[19].replace('p.', '')
mut_tp53.add(prot)
return mut_tp53
def is_hotspot_nt(chrom, pos, ref, alt, hotspot_nucleotides):
if len(ref) > len(alt) and alt != '-':
ref = ref[1:]
if len(alt) > 1:
alt = alt[1:]
else:
alt = '-'
elif len(alt) > len(ref) and ref != '-':
alt = alt[1:]
if len(ref) > 1:
ref = ref[1:]
else:
ref = '-'
key = '-'.join([chrom, str(pos), ref, alt])
return key in hotspot_nucleotides
def is_hotspot_prot(gene, aa_chg, hotspot_proteins):
aa_chg = aa_chg.replace('p.', '')
if not aa_chg: return False
key = '-'.join([gene, aa_chg])
return key in hotspot_proteins
stop_gain_pattern = re.compile('^[A-Z]+\d+\*')
fs_pattern = re.compile('^[A-Z]+(\d+)fs')
aa_snp_chg_pattern = re.compile('^[A-Z]\d+[A-Z]$')
ins_pattern = re.compile('ins.*\*[A-Z]+$')
def parse_specific_mutations(specific_mut_fpath):
tier_by_specific_mutations = dict()
tier_by_type_by_region_by_gene = defaultdict(dict)
dependent_mutations_by_gene = defaultdict(set) # when other mutation is required
with open(specific_mut_fpath) as f:
for i, l in enumerate(f):
if i == 0:
continue
l = l.replace('\n', '')
if not l:
continue
line = l.split('\t')
gene = line[0].upper()
regions = re.findall(r'\d+', line[1])
if '-' in line[1]:
for region_num in range(int(regions[0]) + 1, int(regions[1])):
regions.append(str(region_num))
if 'intron' in line[1]:
regions = ['intron' + region for region in regions]
for index in range(2, len(line) - 1):
if line[index]:
mut = line[index]
tier = index - 1
if 'types' in mut:
types = mut.split(':')[1].split(',')
for region in regions:
tier_by_type_by_region_by_gene[gene][region] = dict()
for type in types:
tier_by_type_by_region_by_gene[gene][region][type] = tier
else:
mutations = []
if 'codon' in mut:
codons = re.findall(r'\d+', mut)
if '-' in mut and len(codons) == 2:
codons = range(int(codons[0]), int(codons[1]) + 1)
for region in regions:
for codon in codons:
tier_by_specific_mutations['-'.join([gene, region, str(codon)])] = tier
mutations.append('-'.join([gene, region, str(codon)]))
elif 'sens' in mut or 'res' in mut:
pattern = re.compile('\((\D+)\s+\D+\)')
sensitization = re.findall(pattern, mut)[0] # like TKI
prot_chg = mut.split()[0].strip().replace('p.', '')
mutations = ['-'.join([gene, prot_chg])]
tier_by_specific_mutations['-'.join([gene, prot_chg])] = tier
dependent_mutations_by_gene[gene].add((sensitization, 'sens' if 'sens' in mut else 'res'))
else:
prot_chg = line[index].replace('p.', '').strip()
mutations = ['-'.join([gene, prot_chg])]
tier_by_specific_mutations['-'.join([gene, mut])] = tier
return tier_by_specific_mutations, tier_by_type_by_region_by_gene, dependent_mutations_by_gene
class VarDict2Mut:
def __init__(self, genome, filt_cnf, tricky_regions_dir, transcripts_fpath, reg_exp_sample=None, platform=None):
self.all_reject_counter = OrderedDefaultDict(int)
self.all_counter = OrderedDefaultDict(int)
self.gene_blacklist_counter = OrderedDefaultDict(int)
self.region_blacklist_counter = OrderedDefaultDict(int)
compendia_fpath = verify_file(filt_ref_data.compendia(genome), 'compendia_ms7_hotspot')
actionable_fpath = verify_file(filt_ref_data.actionable(genome), 'actionable')
filter_common_snp_fpath = verify_file(filt_ref_data.common_snp(genome), 'filter_common_snp')
filter_common_arti_fpath = verify_file(filt_ref_data.common_art(genome), 'filter_common_artifacts')
splice_fpath = verify_file(filt_ref_data.splice(genome), 'splice')
suppressors_fpath = verify_file(filt_ref_data.suppressors(), 'suppressors')
oncogenes_fpath = verify_file(filt_ref_data.oncogenes(), 'oncogenes')
ruledir = verify_dir (filt_ref_data.ruledir(), 'ruledir')
snpeffect_polymorph_fpath = verify_file(filt_ref_data.snpeffect_export_polymorphic(), 'snpeffect_export_polymorphic')
actionable_hotspot_fpath = verify_file(filt_ref_data.actionable_hotspot(), 'actionable_hotspot')
specific_mutations_fpath = verify_file(filt_ref_data.specific_mutations(), 'specific_mutations')
last_critical_aa_fpath = verify_file(filt_ref_data.last_critical_aa(), 'last_critical_aa')
incidentalome_dir = verify_dir (filt_ref_data.incidentalome_dir(), 'incidentalome')
comments_fpath = verify_file(filt_ref_data.ngs_reports_comments(), 'ngs_reports_comments')
if not all([compendia_fpath ,
actionable_fpath ,
filter_common_snp_fpath ,
filter_common_arti_fpath ,
splice_fpath ,
suppressors_fpath ,
oncogenes_fpath ,
ruledir ,
snpeffect_polymorph_fpath,
actionable_hotspot_fpath ,
specific_mutations_fpath ,
last_critical_aa_fpath ,
incidentalome_dir ,
comments_fpath ,
]):
logger.err('Error: some of the required files are not found or empty (see above)')
self.suppressors = parse_genes_list(adjust_path(suppressors_fpath))
self.oncogenes = parse_genes_list(adjust_path(oncogenes_fpath))
self.reg_exp_sample = reg_exp_sample
self.platform = platform
transcripts_fpath = verify_file(transcripts_fpath, silent=True)
if transcripts_fpath:
logger.info('Using canonical transcripts from ' + transcripts_fpath)
with open(transcripts_fpath) as f:
self.transcripts = [tr.strip().split('.')[0] for tr in f]
self.max_ratio = filt_cnf['max_ratio']
self.max_sample_cnt = filt_cnf['max_sample_cnt']
self.min_freq = filt_cnf['min_freq'] # for all variants
self.act_min_freq = filt_cnf['act_min_freq']
self.act_min_freq = self.act_min_freq or self.min_freq // 2
self.germline_min_freq = filt_cnf['germline_min_freq']
self.filt_depth = filt_cnf['filt_depth']
self.min_vd = filt_cnf['min_vd']
self.min_gmaf = filt_cnf['min_gmaf']
self.keep_utr_intronic = filt_cnf['keep_utr_intronic']
self.keep_whole_genome = filt_cnf['keep_whole_genome']
self.keep_hla = filt_cnf['keep_hla']
self.damage_p_value = filt_cnf.get('damage_p_value')
logger.info('Parsing filtering data...')
self.tp53_groups = {'Group 1': parse_mut_tp53(join(ruledir, 'DNE.txt')),
'Group 2': parse_mut_tp53(join(ruledir, 'TA0-25.txt')),
'Group 3': parse_mut_tp53(join(ruledir, 'TA25-50_SOM_10x.txt'))}
self.splice_positions_by_gene = defaultdict(set)
for l in iter_lines(splice_fpath):
pos, g = l.split('\t')
self.splice_positions_by_gene[g].add(pos)
self.last_critical_aa_pos_by_gene = dict()
for l in iter_lines(last_critical_aa_fpath):
g, aa_pos, _ = l.split('\t')
self.last_critical_aa_pos_by_gene[g] = int(aa_pos)
self.filter_snp = set()
for l in iter_lines(filter_common_snp_fpath):
fields = l.split('\t')
self.filter_snp.add('-'.join(fields[1:5]))
self.snpeff_snp = set()
self.snpeff_snp_rsids = set()
for l in iter_lines(snpeffect_polymorph_fpath):
fields = l.split('\t')
snpeff_aachg = fields[2]
snpeff_rsid = fields[5]
if len(fields) > 11 and fields[11]:
snpeff_gene = fields[11]
self.snpeff_snp.add('-'.join([snpeff_gene, snpeff_aachg]))
elif snpeff_rsid != '-':
self.snpeff_snp_rsids.add(snpeff_rsid)
self.filter_artifacts = set()
self.filter_rules_by_gene = defaultdict(list)
for l in iter_lines(filter_common_arti_fpath):
fields = l.split('\t')
if fields[5] == 'rule':
gene, chrom, start, end, action, _, _, _, note = fields[:9]
rule = Rule(gene, chrom=chrom, start=int(start), end=int(end), action=action, note=note)
self.filter_rules_by_gene[gene].append(rule)
else:
gene, chrom, start, ref, alt = fields[:5]
self.filter_artifacts.add('-'.join([chrom, start, ref, alt]))
self.actionable_hotspot_by_gene = defaultdict(dict)
self.common_snps_by_gene = defaultdict(set)
with open(actionable_hotspot_fpath) as f:
for l in f:
l = l.replace('\n', '')
if not l or l.startswith('##'):
continue
fields = l.split('\t')
gene = fields[0]
prot_change = fields[1]
if gene.startswith('#'): # VUS, No special treatment for now
gene = gene[1:]
elif gene.startswith('^'):
gene = gene[1:]
self.common_snps_by_gene[gene].add(prot_change)
else:
is_somatic = fields[2] == 'somatic'
self.actionable_hotspot_by_gene[gene][prot_change] = 'somatic' if is_somatic else 'germline'
self.ngs_reports_comments = defaultdict(dict)
with open(comments_fpath) as f:
for r in csv.DictReader((row for row in f if not row.startswith('#')), delimiter='\t'):
gene = r['Gene']
prot_change = r['AA_Change']
if gene.startswith('^'):
gene = gene[1:] # remove leading ^ character, e.g. ^EGFR -> EGFR
is_somatic = 'somatic' in r['Note']
self.actionable_hotspot_by_gene[gene][prot_change] = 'somatic' if is_somatic else 'germline'
else:
self.ngs_reports_comments[gene][prot_change] = r['Note']
self.act_somatic = dict()
self.act_germline = set()
self.rules = defaultdict(list)
for l in iter_lines(actionable_fpath):
fields = l.split('\t')
if fields[7] == 'germline':
key = '-'.join(fields[1:5])
self.act_germline.add(key)
elif fields[7] == 'somatic':
change = fields[8].strip()
if fields[6] == 'rule':
if fields[4] == '*' and len(fields[3]) == 1:
key = '-'.join(fields[1:4])
self.act_somatic[key] = change
else:
indel_type = ''
if 'indel' in fields[5]: indel_type = 'indel'
elif 'ins' in fields[5]: indel_type = 'ins'
elif 'del' in fields[5]: indel_type = 'del'
rule = Rule(gene=fields[0],
chrom=fields[1],
start=int(fields[2]),
end=int(fields[3]),
length=int(fields[4]),
required_inframe='inframe' in fields[5],
indel_type=indel_type,
change=change)
self.rules[rule.gene].append(rule)
# elif fields[5] == inframe_del:
# self.rules[inframe_del].setdefault(fields[0], []).append([fields[1]] + [int (f) for f in fields[2:5]])
# elif fields[5] == inframe_ins:
# self.rules[inframe_ins].setdefault(fields[0], []).append([fields[1]] + [int (f) for f in fields[2:5]])
else:
key = '-'.join(fields[1:5])
self.act_somatic[key] = change
self.hotspot_nucleotides = set()
self.hotspot_proteins = set()
for l in iter_lines(compendia_fpath):
fields = l.split('\t')
if fields[5].startswith('g.'):
continue
self.hotspot_nucleotides.add('-'.join(fields[1:5]))
if not fields[6]:
continue
self.hotspot_proteins.add('-'.join([fields[0], fields[6]]))
logger.info('Parsing gene blacklists...')
anno_cfg = get_anno_config()
self.gene_blacklists_by_reason = parse_gene_blacklists(anno_cfg['blacklist']['genes'], incidentalome_dir)
for r in self.gene_blacklists_by_reason.keys():
self.gene_blacklist_counter[r] = 0
self.gene_blacklist_counter['hardfilter'] = 0
# self.gene_to_soft_filter = list(iter_lines(join(incidentalome_dir, 'soft_filter.txt')))
# self.region_blacklists_by_reason = dict()
# if tricky_regions_dir:
# info('Parsing region blacklists...')
# self.region_blacklists_by_reason = load_tricky_regions(anno_cfg['blacklist']['regions'], tricky_regions_dir)
# for r in self.region_blacklists_by_reason.keys():
# self.region_blacklist_counter[r] = 0
logger.info('Parsing actionable rules and specific mutations...')
self.tier_by_specific_mutations, self.tier_by_type_by_region_by_gene, self.sensitizations_by_gene\
= parse_specific_mutations(specific_mutations_fpath)
if not all([self.rules, self.splice_positions_by_gene, self.act_somatic, self.act_germline, self.actionable_hotspot_by_gene]):
if not self.rules:
logger.err('No rules, cannot proceed')
if not self.splice_positions_by_gene:
logger.err('No tp53_positions, cannot proceed')
if not self.act_somatic:
logger.err('No act_somatic, cannot proceed')
if not self.act_germline:
logger.err('No act_germline, cannot proceed')
if not self.actionable_hotspot_by_gene:
logger.err('No actionable_hotspots, cannot proceed')
self.status = None
self.reason_by_status = None
self.output_f = None
self.fm_output_f = None
self.rejected_output_f = None
aa_chg_trim_pattern = re.compile('^([A-Z]\d+)[A-Z?]$')
def check_actionable(self, chrom, pos, ref, alt, gene, aa_chg, cosm_aa_chg, af, clnsig):
change_len = len(alt) - len(ref)
key = '-'.join([chrom, str(pos), ref, alt])
if key in self.act_somatic:
return 'act_somatic'
if key in self.act_germline and af >= self.germline_min_freq:
return 'act_germline'
if len(ref) == 1 and change_len == 0: # SNP
key = '-'.join([chrom, str(pos), ref])
if key in self.act_somatic:
return 'act_somatic'
if gene in self.actionable_hotspot_by_gene and \
(VarDict2Mut.aa_chg_trim_pattern.match(aa_chg) or re.compile('^(M1)\?$').match(aa_chg)):
act_hotspot_by_aa_chg = self.actionable_hotspot_by_gene[gene]
status = act_hotspot_by_aa_chg.get(aa_chg)
if status is not None:
if status == 'somatic' and af > self.act_min_freq:
return 'act_hotspot_somatic'
elif status == 'germline' and af > self.germline_min_freq:
return 'act_hotspot_germline'
aa_chg_trim = re.findall(VarDict2Mut.aa_chg_trim_pattern, aa_chg)[0]
status = act_hotspot_by_aa_chg.get(aa_chg_trim)
if status is not None:
return 'act_hotspot_' + status
if gene == 'TP53':
tp53_group = self.classify_tp53(aa_chg, pos, ref, alt)
if tp53_group is not None:
return 'act_somatic_tp53_group_' + str(tp53_group)
if gene in self.rules:
for r in self.rules[gene]:
if change_len >= r.length and r.start <= pos + len(ref) - 1 and pos <= r.end:
if r.required_inframe and change_len % 3 != 0:
continue
if any([r.indel_type == 'ins' and change_len > 0,
r.indel_type == 'del' and change_len < 0,
r.indel_type == 'indel' and change_len != 0]):
return 'somatic'
return None
def classify_tp53(self, aa_chg, pos, ref, alt):
aa_chg = aa_chg.replace(' ', '')
if str(pos) in self.splice_positions_by_gene['TP53'] and len(ref) == 1 and len(alt) == 1:
return 6
aa_chg = aa_chg.replace('p.', '')
aa_num = 0
if aa_chg:
aa_num_str = re.sub('[^0-9]', '', aa_chg)
if not aa_num_str:
logger.err('TP53: cannot parse aa num from aa_chg=' + str(aa_chg))
else:
aa_num = int(aa_num_str)
if aa_snp_chg_pattern.match(aa_chg):
for i in [1, 2, 3]:
if aa_chg in self.tp53_groups['Group ' + str(i)]:
return i
elif stop_gain_pattern.match(aa_chg):
if aa_num < 359:
return 4
elif fs_pattern.match(aa_chg):
if aa_num < 359:
return 5
return None
def check_rob_hedley_actionable(self, gene, aa_chg, effect, region):
if aa_chg:
gene_aachg = '-'.join([gene, aa_chg])
if gene_aachg in self.tier_by_specific_mutations:
tier = self.tier_by_specific_mutations[gene_aachg]
return 'actionable' if tier == 1 else 'tier2'
if region and effect in ['HIGH', 'MODERATE']:
codon = re.sub('[^0-9]', '', aa_chg)
gene_codon_chg = '-'.join([gene, region, codon])
if gene_codon_chg in self.tier_by_specific_mutations:
tier = self.tier_by_specific_mutations[gene_codon_chg]
return ('act' if tier == 1 else 'tier2') + '_codon_' + codon + '_in_exon_' + region
def check_by_type_and_region(self, cdna_chg, region, gene):
types_by_region = self.tier_by_type_by_region_by_gene.get(gene)
if types_by_region:
for type_ in types_by_region.get(region, []):
if type_ in cdna_chg:
tier = types_by_region[region][type_]
return ('act' if tier == 1 else 'tier2') + '_' + type_ + '_in_gene_' + gene
return False
def _check_artifacts(self, chrom, pos, id_field, gene, aa_chg):
snps = re.findall(r'rs\d+', id_field)
if any(snp in self.snpeff_snp_rsids for snp in snps):
return 'snp in snpeffect_export_polymorphic'
for r in self.filter_rules_by_gene.get(gene, []):
if r.action == 'ignore' and chrom == r.chrom and r.start <= pos <= r.end:
if r.note:
return 'filter artifacts: ' + r.note
else:
return 'filter artifacts: ' + r.action
if gene in self.common_snps_by_gene and aa_chg in self.common_snps_by_gene[gene]:
return 'common SNP'
if gene in self.ngs_reports_comments and aa_chg in self.ngs_reports_comments[gene]:
return 'ngs report comment: ' + self.ngs_reports_comments[gene][aa_chg]
return None
def print_mutation(self, status, reasons, blacklisted_reasons, fields, fm_data):
self.apply_counter('lines_written')
self.apply_counter(status)
if fm_data and self.fm_output_f:
sample, platform, gene, pos, cosm_aa_chg, aa_chg, cdna_chg, chrom, depth, allele_freq = fm_data
self.fm_output_f.write('\t'.join([sample, platform, 'short-variant', gene, status, aa_chg, cdna_chg,
chrom + ':' + pos, str(depth), str(allele_freq * 100),
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-', '-']) + '\n')
self.output_f.write('\t'.join(fields + [status] + [', '.join(reasons)] + [', '.join(blacklisted_reasons)]) + '\n')
def apply_counter(self, reason):
self.all_counter[reason] += 1
def reject_mutation(self, reason, fields):
self.apply_reject_counter(reason)
if self.rejected_output_f:
self.rejected_output_f.write('\t'.join(fields + ['rejected'] + [reason] + ['']) + '\n')
def apply_reject_counter(self, reason):
self.all_reject_counter[reason] += 1
def apply_gene_blacklist_counter(self, reason):
self.gene_blacklist_counter[reason] += 1
def apply_region_blacklist_counter(self, reason):
self.region_blacklist_counter[reason] += 1
def check_blacklist_genes(self, gene_name, aa_pos=None):
reasons = []
for reason, data in self.gene_blacklists_by_reason.items():
if check_gene_in_a_blacklist(gene_name, data, aa_pos):
reasons.append(reason)
return reasons
def filter(self, rec, tumor_indices):
id_field = rec.ID or ""
chrom = rec.CHROM
pos = rec.POS
gene = rec.INFO.get('PCGR_SYMBOL')
aa_chg = ''
ref = rec.REF
alt = rec.ALT[0]
af = rec.INFO['TUMOR_AF']
clnsig = rec.INFO.get('PCGR_CLINVAR_CLNSIG')
if 'chr' not in chrom: chrom = 'chr' + chrom
nt_chg_key = '-'.join([chrom, str(pos), ref, alt])
fail_reason = self._check_artifacts(chrom, pos, id_field, gene, aa_chg)
if fail_reason:
rec.INFO['AZ_artefact'] = fail_reason
# cosmic_counts = map(int, fields[cosmcnt_col].split()) if cosmcnt_col is not None else None
# cosm_aa_chg = self.check_by_var_class(var_class, cosm_aa_chg, cosmic_counts)
# aa_chg = self.check_by_effect(var_type, aa_chg, cdna_chg, effect)
# aa_pos = None
# if aa_chg_pos_regexp.match(aa_chg):
# aa_pos = int(aa_chg_pos_regexp.findall(aa_chg)[0])
if is_hotspot_nt(chrom, pos, ref, alt, self.hotspot_nucleotides):
print('Setting AZ_hotspot')
rec.INFO['AZ_hotspot'] = 'hotspot_nucl_change'
print('Set AZ_hotspot')
# elif is_hotspot_prot(gene, aa_chg, self.hotspot_proteins):
# self.update_status('likely', 'hotspot_AA_change')
#################################
# Checking actionable mutations #
#################################
actionable_status = \
self.check_actionable(chrom, pos, ref, alt, gene, cosm_aa_chg=None, aa_chg=aa_chg, af=af, clnsig=clnsig) or \
self.check_rob_hedley_actionable(gene, aa_chg, '', '') or \
self.check_by_type_and_region('', '', gene)
if actionable_status:
if 'germline' in actionable_status and af < self.germline_min_freq:
rec.INFO['AZ_artefact'] = 'act germline and AF < ' + str(self.germline_min_freq)
elif af < self.act_min_freq:
rec.INFO['AZ_artefact'] = 'act somatic and AF < ' + str(self.act_min_freq)
else:
rec.INFO['AZ_hotspot'] = actionable_status
else:
if nt_chg_key in self.filter_snp:
rec.INFO['AZ_artefact'] = 'not act and in filter_common_snp'
if nt_chg_key in self.filter_artifacts and af < 0.35:
rec.INFO['AZ_artefact'] = 'not act and in filter_artifacts and AF < 0.35'
# gmaf = fields[headers.index('GMAF')]
# if gmaf and all(not g or float(g) > self.min_gmaf for g in gmaf.split(',')):
# self.reject_mutation('not act and all GMAF > ' + str(self.min_gmaf), fields)
# continue
# if var_class == 'dbSNP':
# self.reject_mutation('clnsig dbSNP', fields)
# continue
# if '-'.join([gene, aa_chg]) in self.snpeff_snp and var_class != 'ClnSNP_known':
# self.reject_mutation('not known and in SnpEff SNP database', fields)
# continue
# if self.min_freq and af < self.min_freq:
# self.reject_mutation('not act and AF < ' + str(self.min_freq) + ' (min_freq)', fields)
# continue
if self.check_blacklist_genes(gene):
rec.INFO['AZ_artefact'] = 'not known and blacklist gene'
# Ignore any variants that occur after last known critical amino acid
# if aa_pos is not None:
# if gene in self.last_critical_aa_pos_by_gene and aa_pos >= self.last_critical_aa_pos_by_gene[gene]:
# self.reject_mutation('variants occurs after last known critical amino acid', fields)
# continue
# blacklisted_reasons = self.check_callability_regions(chrom=chrom, start=pos - 1, end=pos - 1 + len(ref)
return rec
|
468621
|
import unittest
from airmozilla.search.split_search import split_search
class Test(unittest.TestCase):
def shortDescription(self):
return None
def test_basic(self):
""" one free text part, two keywords """
keywords = ('to', 'from')
q = "Peter something to:AAa aa from:Foo bar"
s, params = split_search(q, keywords)
self.assertEqual(s, 'Peter something')
self.assertEqual(params, {'to': 'AAa aa', 'from': 'Foo bar'})
def test_basic_case_insensitive(self):
""" keywords should match case insensivitely """
keywords = ('to', 'from')
q = "something To:Bar From:Foo"
s, params = split_search(q, keywords)
self.assertEqual(s, 'something')
self.assertEqual(params, {'to': 'Bar', 'from': 'Foo'})
def test_unrecognized_keywords(self):
""" free text and keywords we don't support """
keywords = ('something', 'else')
q = "Free text junk: Aaa aaa foo:bar"
s, params = split_search(q, keywords)
self.assertEqual(s, q)
self.assertEqual(params, {})
def test_unrecognized_and_recognized_keywords(self):
""" free text and keywords we don't support """
keywords = ('something', 'else', 'name')
q = "Free text junk: something else name: peter"
s, params = split_search(q, keywords)
self.assertEqual(s, 'Free text junk: something else')
self.assertEqual(params, {'name': 'peter'})
def test_empty_keyword_value(self):
""" free text and an empty keyword """
keywords = ('to',)
q = "Naughty parameter to:"
s, params = split_search(q, keywords)
self.assertEqual(s, "Naughty parameter")
self.assertEqual(params, {'to': ''})
def test_unicode_string(self):
""" test with unicode string input """
keywords = ('from', 'to')
q = u"\xa1 to:\xa2 from:\xa3"
s, params = split_search(q, keywords)
self.assertEqual(s, u'\xa1')
self.assertEqual(params, {u'to': u'\xa2', u'from': u'\xa3'})
def test_invalid_keywords(self):
"""Test to pass invalid keywords"""
keywords = ('to]',)
q = "Peter something to:AAa aa"
self.assertRaises(ValueError, split_search, q, keywords)
def test_with_colon_in_value(self):
"""what if 'tag' is a valid keyword and the value is something
like 'git:foobar'"""
keywords = ['tag']
q = "find this tag: git:foobar"
s, params = split_search(q, keywords)
self.assertEqual(s, 'find this')
self.assertEqual(params, {'tag': 'git:foobar'})
def test_just_a_keyword(self):
"""searching for a term which is the keyword"""
# this is a stupidity of a bug I've found
keywords = ['tag']
q = "tag"
s, params = split_search(q, keywords)
self.assertEqual(s, 'tag')
self.assertEqual(params, {})
|
468622
|
from openmdao.api import Problem,IndepVarComp,ScipyOptimizeDriver
from Classes import Vflutter
from time import *
import os
name = ctime()
os.makedirs(name)
os.chdir(name)
objectif='mVflutter' # opposite of the critical speed (first unstable mode, either divergence or flutter)
Ori1Dep = 42
Ori2Dep = 42
Start = time()
Prob = Problem()
model = Prob.model
model.add_subsystem('Souplessesyst',Vflutter.Vflutter(),promotes=['*'])
model.add_subsystem('IndepVar1',IndepVarComp('Ori_1',Ori1Dep,units='deg'),promotes=['Ori_1'])
model.add_subsystem('IndepVar2',IndepVarComp('Ori_2',Ori2Dep,units='deg'),promotes=['Ori_2'])
## run optimisation
## COBYLA
Prob.driver = ScipyOptimizeDriver()
Prob.driver.options['optimizer'] = 'COBYLA'
Prob.driver.options['maxiter'] = 1000
Prob.driver.options['tol'] = 1e-9
model.add_design_var('Ori_1', lower=-90, upper=90)
model.add_design_var('Ori_2', lower=-90, upper=90)
model.add_objective(objectif)
Prob.setup()
Prob.run_driver()
print(Prob['Ori_1'])
print(Prob['Ori_2'])
print(Prob[objectif])
End = time()
print("Elapsed time : {0}".format(End-Start))
#creation of a log file
file = open("logfile.txt","w")
file.write("Optimisation COBYLA\n")
file.write(name+"\n")
file.write("Orientation départ 1 = {0}\n".format(Ori1Dep))
file.write("Orientation départ 2 = {0}\n".format(Ori2Dep))
file.write("Orientation pli inférieur = {0}\n".format(Prob['Ori_1']))
file.write("Orientation pli supérieur = {0}\n".format(Prob['Ori_2']))
file.write("Fonction objectif ({0}) = {1}\n".format(objectif,Prob[objectif]))
file.write("Elapsed time : {0}".format(End-Start))
|
468635
|
import json
import logging
import os
import sys
LOG = logging.getLogger(__name__)
runtimeValues = {}
# Depth of credscan
credscan_depth = "2"
DEPSCAN_CMD = "/usr/local/bin/depscan"
# Flag to disable telemetry
DISABLE_TELEMETRY = False
# Telemetry server if required here
TELEMETRY_URL = ""
"""
Supported language scan types
"""
scan_types = [
"ansible",
"apex",
"aws",
"bash",
"bom",
"credscan",
"depscan",
"go",
"java",
"jsp",
"kotlin",
"kubernetes",
"nodejs",
"plsql",
"puppet",
"python",
"ruby",
"rust",
"terraform",
"vf",
"vm",
"yaml",
]
# Default ignore list
ignore_directories = [
".git",
".svn",
".mvn",
".idea",
"dist",
"bin",
"obj",
"backup",
"docs",
"tests",
"test",
"tmp",
"reports",
]
def get(configName, default_value=None):
"""Method to retrieve a config given a name. This method lazy loads configuration
values and helps with overriding using a local config
:param configName: Name of the config
:return Config value
"""
try:
value = runtimeValues.get(configName)
if not value:
value = os.environ.get(configName.upper())
if not value:
value = getattr(sys.modules[__name__], configName, None)
return value
except Exception:
return default_value
def set(configName, value):
"""Method to set a config during runtime
:param configName: Config name
:param value: Value
"""
runtimeValues[configName] = value
"""
Mapping for application types to scan tools for projects requiring just a single tool
"""
scan_tools_args_map = {
"ansible": [
"ansible-lint",
*["--exclude " + d for d in ignore_directories],
"--parseable-severity",
"*.yml",
],
"apex": {
"pmd": [
*os.environ["PMD_CMD"].split(" "),
"-no-cache",
"--failOnViolation",
"false",
"-language",
"apex",
"-d",
"%(src)s",
"-r",
"%(report_fname_prefix)s.csv",
"-f",
"csv",
"-R",
os.environ["APP_SRC_DIR"] + "/rules-pmd.xml",
]
},
"aws": ["cfn-lint", "-f", "json", "-e", "%(src)s/**/*.yaml"],
"bom": ["cdxgen", "-o", "%(report_fname_prefix)s.xml", "%(src)s"],
"credscan": [
"gitleaks",
"--depth=" + get("credscan_depth"),
"--repo-path=%(src)s",
"--redact",
"--timeout=2m",
"--report=%(report_fname_prefix)s.json",
"--report-format=json",
],
"bash": [
"shellcheck",
"-a",
"--shell=%(type)s",
"-f",
"json",
"-S",
"error",
"--color=never",
"(filelist=sh)",
],
"depscan": [
get("DEPSCAN_CMD"),
"--no-banner",
"--src",
"%(src)s",
"--report_file",
"%(report_fname_prefix)s.json",
],
"go": {
"gosec": [
"gosec",
"-fmt=json",
"-confidence=medium",
"-severity=medium",
"-no-fail",
"-out=%(report_fname_prefix)s.json",
"./...",
],
"staticcheck": ["staticcheck", "-f", "json", "./..."],
},
"jsp": {
"pmd": [
*os.environ["PMD_CMD"].split(" "),
"-no-cache",
"--failOnViolation",
"false",
"-language",
"jsp",
"-d",
"%(src)s",
"-r",
"%(report_fname_prefix)s.csv",
"-f",
"csv",
"-R",
os.environ["APP_SRC_DIR"] + "/rules-pmd.xml",
]
},
"kotlin": [
"java",
"-jar",
"/usr/local/bin/detekt-cli.jar",
"-i",
"%(src)s",
"-r",
"xml:%(report_fname_prefix)s.xml",
],
"kubernetes": ["kube-score", "score", "-o", "json", "(filelist=yaml)"],
"plsql": {
"pmd": [
*os.environ["PMD_CMD"].split(" "),
"-no-cache",
"--failOnViolation",
"false",
"-language",
"plsql",
"-d",
"%(src)s",
"-r",
"%(report_fname_prefix)s.csv",
"-f",
"csv",
"-R",
os.environ["APP_SRC_DIR"] + "/rules-pmd.xml",
]
},
"puppet": ["puppet-lint", "--error-level", "all", "--json", "%(src)s"],
"rust": ["cargo-audit", "audit", "-q", "--json", "-c", "never"],
"terraform": ["tfsec", "--format", "json", "--no-colour", "%(src)s"],
"vf": {
"pmd": [
*os.environ["PMD_CMD"].split(" "),
"-no-cache",
"--failOnViolation",
"false",
"-language",
"vf",
"-d",
"%(src)s",
"-r",
"%(report_fname_prefix)s.csv",
"-f",
"csv",
"-R",
os.environ["APP_SRC_DIR"] + "/rules-pmd.xml",
]
},
"vm": {
"pmd": [
*os.environ["PMD_CMD"].split(" "),
"-no-cache",
"--failOnViolation",
"false",
"-language",
"vm",
"-d",
"%(src)s",
"-r",
"%(report_fname_prefix)s.csv",
"-f",
"csv",
"-R",
os.environ["APP_SRC_DIR"] + "/rules-pmd.xml",
]
},
"yaml": ["yamllint", "-f", "parsable", "(filelist=yaml)"],
}
"""
This map contains the SARIF purpose string for various tools
"""
tool_purpose_message = {
"nodejsscan": "Static security code scan by NodeJsScan",
"findsecbugs": "Security audit by Find Security Bugs",
"pmd": "Static code analysis by PMD",
"/opt/pmd-bin/bin/run.sh": "Static code analysis by PMD",
"gitleaks": "Secrets audit by gitleaks",
"gosec": "Go security checks by gosec",
"tfsec": "Terraform static analysis by tfsec",
"shellcheck": "Shell script analysis by shellcheck",
"bandit": "Security audit for python by bandit",
"staticcheck": "Go static analysis",
}
# Map to link to the reference for the given rule
tool_ref_url = {
"shellcheck": "https://github.com/koalaman/shellcheck/wiki/SC%(rule_id)s",
"gosec": "https://github.com/securego/gosec#available-rules",
"staticcheck": "https://staticcheck.io/docs/checks#%(rule_id)s",
}
# Build break rules
build_break_rules = {"default": {"max_critical": 0, "max_high": 2, "max_medium": 5}}
# URL for viewing reports online
hosted_viewer_uri = "https://sarifviewer.azurewebsites.net"
def reload():
# Load any .sastscanrc file from the root
if get("SAST_SCAN_SRC_DIR"):
scanrc = os.path.join(get("SAST_SCAN_SRC_DIR"), ".sastscanrc")
if os.path.exists(scanrc):
with open(scanrc, "r") as rcfile:
new_config = json.loads(rcfile.read())
for key, value in new_config.items():
exis_config = get(key)
if isinstance(exis_config, dict):
exis_config = exis_config.update(value)
set(key, exis_config)
else:
set(key, value)
|
468687
|
from typing import List, Tuple
from rlbench.backend.task import Task
from typing import List
from rlbench.backend.task import Task
from rlbench.const import colors
from rlbench.backend.conditions import NothingGrasped, DetectedCondition
from rlbench.backend.spawn_boundary import SpawnBoundary
import numpy as np
from pyrep.objects.shape import Shape
from pyrep.objects.proximity_sensor import ProximitySensor
from pyrep.objects.dummy import Dummy
class OpenJar(Task):
def init_task(self) -> None:
self.lids = [Shape('jar_lid%d' % i) for i in range(2)]
self.jars = [Shape('jar%d' % i) for i in range(2)]
self.register_graspable_objects(self.lids)
self.boundary = Shape('spawn_boundary')
self.conditions = [NothingGrasped(self.robot.gripper)]
def init_episode(self, index: int) -> List[str]:
b = SpawnBoundary([self.boundary])
success = ProximitySensor('success')
for obj in self.jars:
b.sample(obj, min_distance=0.01)
w0 = Dummy('waypoint0')
w0.set_orientation([-np.pi, 0, -np.pi], reset_dynamics=False)
w0.set_position([0,0,0.1], relative_to=self.lids[index % 2],
reset_dynamics=False)
target_color_name, target_color_rgb = colors[index]
color_choice = np.random.choice(
list(range(index)) + list(
range(index + 1, len(colors))),
size=1, replace=False)[0]
_, distractor_color_rgb = colors[color_choice]
self.jars[index % 2].set_color(target_color_rgb)
other_index = {0: 1, 1: 0}
self.jars[other_index[index % 2]].set_color(distractor_color_rgb)
self.conditions += [DetectedCondition(self.lids[index % 2], success)]
self.register_success_conditions(self.conditions)
return ['open the %s jar' % target_color_name,
'unscrew the %s jar' % target_color_name,
'grasp the lid of the %s jar, unscrew it in an anti_clockwise '
'direction until it is removed from the jar, and leave it on '
'the table top' % target_color_name,
'remove the lid from the %s jam jar and set it down on the '
'table' % target_color_name]
def variation_count(self) -> int:
return len(colors)
def cleanup(self) -> None:
self.conditions = [NothingGrasped(self.robot.gripper)]
def base_rotation_bounds(self) -> Tuple[Tuple[float, float, float],
Tuple[float, float, float]]:
# This is here to stop the issue of gripper rotation joint reaching its
# limit and not being able to go through the full range of rotation to
# unscrew, leading to a weird jitery and tilted cap while unscrewing.
# Issue occured rarely so is only minor
return (0.0, 0.0, -0.6*np.pi), (0.0, 0.0, +0.6*np.pi)
|
468728
|
import compas_rrc as rrc
if __name__ == "__main__":
# Create Ros Client
ros = rrc.RosClient()
ros.run()
# Create ABB Client
abb = rrc.AbbClient(ros, "/rob1")
print("Connected.")
# Set tool
abb.send(rrc.SetTool("tool0"))
# Set work object
abb.send(rrc.SetWorkObject("wobj0"))
# Get frame and external axes
frame, external_axes = abb.send_and_wait(rrc.GetRobtarget())
# Print received values
print(frame, external_axes)
# Change a value of the frame
frame.point[0] -= 50
# Set speed [mm/s]
speed = 100
# Move robot the new pos
done = abb.send_and_wait(
rrc.MoveToRobtarget(frame, external_axes, speed, rrc.Zone.FINE)
)
# Print feedback
print("Feedback = ", done)
# End of Code
print("Finished")
# Close client
ros.close()
|
468766
|
import random
from schafkopf.game_modes import NO_GAME
from schafkopf.players.player import Player
class RandomPlayer(Player):
"""Random Player that never declares a game mode and randomly plays cards"""
def choose_game_mode(self, options, public_info):
return (NO_GAME, None)
def play_card(self, public_info, options=None):
if options is None:
card = random.choice(self.hand)
else:
card = random.choice(options)
self.hand.remove(card)
return card
class FullyRandomPlayer(Player):
"""Also declares game mode randomly from the options"""
def choose_game_mode(self, options, public_info):
if options is None:
chosen_mode = (NO_GAME, None)
else:
chosen_mode = random.choice(options)
return chosen_mode
def play_card(self, public_info, options=None):
if options is None:
card = random.choice(self.hand)
else:
card = random.choice(options)
self.hand.remove(card)
return card
|
468784
|
x = 7
y = 11
print("x={}, y={}".format(x, y))
# swap: nonpythonic
# temp = x
# x = y
# y = temp
y, x = x, y
print("x={}, y={}".format(x, y))
|
468795
|
from multiprocessing import *
from ctypes import *
def f():
print(1)
# string_at(1)
print(2)
if __name__ == '__main__':
p = Process(target=f)
p.start()
p.join()
print(3, p.exitcode)
|
468827
|
from pkg_resources import parse_version
import os, requests
# The default namespace for our tagged container images
DEFAULT_TAG_NAMESPACE = "adamrehn"
class GlobalConfiguration(object):
"""
Manages access to the global configuration settings for ue4-docker itself
"""
@staticmethod
def getLatestVersion():
"""
Queries PyPI to determine the latest available release of ue4-docker
"""
releases = [
parse_version(release)
for release in requests.get("https://pypi.org/pypi/ue4-docker/json").json()[
"releases"
]
]
return sorted(releases)[-1]
@staticmethod
def getTagNamespace():
"""
Returns the currently-configured namespace for container image tags
"""
return os.environ.get("UE4DOCKER_TAG_NAMESPACE", DEFAULT_TAG_NAMESPACE)
@staticmethod
def resolveTag(tag):
"""
Resolves a Docker image tag with respect to our currently-configured namespace
"""
# If the specified tag already includes a namespace, simply return it unmodified
return (
tag
if "/" in tag
else "{}/{}".format(GlobalConfiguration.getTagNamespace(), tag)
)
|
468854
|
from hamcrest.library.text.substringmatcher import SubstringMatcher
from hamcrest.core.helpers.hasmethod import hasmethod
__author__ = "<NAME>"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class StringContains(SubstringMatcher):
def __init__(self, substring):
super(StringContains, self).__init__(substring)
def _matches(self, item):
if not hasmethod(item, 'find'):
return False
return item.find(self.substring) >= 0
def relationship(self):
return 'containing'
def contains_string(substring):
"""Matches if object is a string containing a given string.
:param string: The string to search for.
This matcher first checks whether the evaluated object is a string. If so,
it checks whether it contains ``string``.
Example::
contains_string("def")
will match "abcdefg".
"""
return StringContains(substring)
|
468887
|
import io
import csv
import glob
import os.path
import constants
import re
import sys
import itertools
import operator # for getbds4opt4start()
import subprocess, shlex
import errno # for makedir()
# check character string
def is_None_empty_whitespace(mystr):
if mystr and mystr.strip():
# mystr is not None AND mystr is not empty or whitespaces
return False
# mystr is None OR mystr is empty or whitespaces
return True
# This code makes the assumption that anything that can be iterated over will contain other elements, and should not be
# considered a leaf in the "tree". If an attempt to iterate over an object fails, then it is not a sequence, and hence
# certainly not an empty sequence (thus False is returned). Finally, this code makes use of the fact that all returns
# True if its argument is an empty sequence.
# Note: seq should not contain any character string except ''.
def isEmpty(seq):
try:
return all(map(isEmpty, seq))
except TypeError:
return False
def read_file(file):
fp = open(file, 'r')
content = fp.read()
fp.close()
return content
# Write content into file filePath
def write2file(filePath = None, content = None):
directory = os.path.dirname(filePath)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise
with open(filePath, 'w') as fp:
fp.write(content)
# Split an multiple-sequence fasta file containing multiple fasta sequences into multiple individual fasta files
# with the file names with sequence id included.
def split_tandem_fasta(huge_fasta_file, output_path):
mfastaFileName = os.path.basename(huge_fasta_file)
fp_fasta = open("/dev/null", "r")
with open(huge_fasta_file, "r") as fp:
for line in fp:
line = line.strip()
if len(line) == 0:
continue
if line[0] == '>':
fp_fasta.close()
seqid = line[1:].split(maxsplit=1)[0]
fasta_file_name = '.'.join([mfastaFileName, seqid])
fasta_file = os.path.join(output_path, fasta_file_name)
fp_fasta= open(fasta_file, "w")
fp_fasta.write(line+'\n')
fp_fasta.close()
# This function returns a generator, using a generator comprehension. The generator returns the string sliced,
# from 0 + a multiple of the length of the chunks, to the length of the chunks + a multiple of the length of the chunks.
# example:
# s = 'CATTCGTCT', tuple(chunkstring(s, 3)) = ('CAT', 'TCG', 'TCT')
# s = 'CATTCGTC', tuple(chunkstring(s, 3)) = ('CAT', 'TCG', 'TC')
#
def chunkstring(string, length):
return (string[i : i + length] for i in range(0, len(string), length))
# Translate gene sequence into peptide sequence.
# Return: 'MKLQFP.....RPQ', peptide sequence represented by single letter
#
# gene2pepTable: (table1, ..., table11, ...)
# table11: {'starts': (initialcodon, ..., initialcondon), codon: aa, ..., codon: aa}
# codon: 'TTT' or 'TTC' or 'TTG' or ... or 'TGA' or 'GGG'
# aa: 'F' or 'L' or ... or '*' or 'G', amino acid when codon is not the initial codon
#
# geneSeq: gene sequence eg. 'CATTCGTCT', it must be the length of 3*integer
# or the last one (or two) letter will be removed.
#
def gene2pep(table='11', geneSeq=''):
if len(geneSeq) < 9:
print('Warning: mini ORF with length < 9 will not be translated', geneSeq)
return ''
pep = []
first3 = geneSeq[:3]
geneCodons = chunkstring(geneSeq[3:], 3)
# translate the first codon in gene sequence
if first3 in constants.gene2pepTable[table]['starts']:
# it is a start codon
pep.append('M')
else:
# it is not a start condon
aa = constants.gene2pepTable[table][first3]
if aa == '*':
print('hello stop codon', first3)
else:
pep.append(aa)
# stop translation if the first codon is the stop codon
#if '*' in pep:
# return '*'
# translate the left codon but stop translation if stop codon is found.
for codon in geneCodons:
# the last codon might be a single-letter string or double-letter string.
if len(codon) != 3:
continue
aa = constants.gene2pepTable[table][codon]
if aa == '*':
print('hello stop codon', codon)
# break
else:
pep.append(aa)
return ''.join(pep)
# Format sequence into FASTA format and return it with header
# header: messeage for header line in fasta format
# seq: sequence to format into FASTA with lineWidth single-letter codes per line
def fasta_format(header, seq):
header = '>' + header
fasta_seq = ''
lineWidth = constants.fastaLineWidth
#for row in chunkstring(seq, lineWidth):
# fasta_seq += row + '\n'
fasta_seq = '\n'.join(chunkstring(seq, lineWidth))
fasta = '{}\n{}\n'.format(header, fasta_seq)
#fasta = header + '\n' + fasta_seq
return fasta
# header, seq: character string
def fastaFormat(header, seq):
header = '>' + header
fasta_seq = ''
fasta_seq = '\n'.join(chunkstring(seq, constants.fastaLineWidth))
fasta = '\n'.join([header, fasta_seq])
return fasta
# get both compound identifier and content of fasta sequence
# seqs: [(id, seq), ..., (id, seq)]
# seq: character string
def getFasta(fastaFile):
fp = open(fastaFile, 'r')
seqs = []
seq = []
id = ''
for line in fp:
if line.replace('\b','').strip() == '':
continue # remove blank line
if line[0] == '>':
if len(seq) > 0:
seqs.append((id, ''.join(seq)))
id = line[1:].split(maxsplit=1)[0]
seq = []
continue
seq.append(line.strip())
if len(seq) > 0:
seqs.append((id, ''.join(seq)))
else:
print('No sequence in', fastaFile)
fp.close()
return seqs
# seqs: [(id, seq), ..., (id, seq)]
# seq: character string
def getFasta_idseq(fastaFile):
fp = open(fastaFile, 'r')
seqs = []
seq = []
id = ''
for line in fp:
if line.strip() == '':
continue # remove blank line
if line[0] == '>':
if len(seq) > 0:
seqs.append((id, ''.join(seq)))
seq = []
id = line[1:].split(maxsplit=1)[0]
continue
seq.append(line.strip())
if len(id) > 0:
seqs.append((id, ''.join(seq)))
else:
print('No sequence in', fastaFile)
fp.close()
return seqs
# get both header and content of fasta sequence
# seqs: [(header, seq), ..., (header, seq)]
# seq: character string
def getFastaFull(file):
seqs = []
with open(file, 'r') as fp:
seq = []
for line in fp:
#line = line.replace('\b', '').strip()
line = line.strip()
if line == '':
continue # remove blank line
if line[0] == '>':
if len(seq) > 0:
seqs.append((header, ''.join(seq)))
seq = []
header = line[1:] # header
continue
seq.append(line)
seqs.append((header, ''.join(seq)))
return seqs
# write IS elements in a genome into a csv file
# genome_is: list, [] or [['ISDge6', 'IS5', 'ISL2', '-', '-', '-', '-', '36785', '36563', '802', '1024', '802', 'Partial'],
# ['ISDge2', 'IS1', '.', '+', '37705', '38452', '748', '-', '-', '-', '-', '', '-'], ...]
# write a empty file if genome_is is empty
def output_csv(csvfile, genome_is):
with open(csvfile, 'w', newline='') as f:
writer = csv.writer(f, lineterminator = '\n', quoting = csv.QUOTE_MINIMAL)
writer.writerows(genome_is)
# Input
# csvfile: character string, full-path file name pointing to a csv file
# rowList: [row, ...]
# row: (item, ...) or [item, ...]
def writeCsvFile(csvfile, rowList, delimiter=','):
with open(csvfile, 'w', newline='') as fp:
spamwriter = csv.writer(fp, delimiter=delimiter, lineterminator='\n')
spamwriter.writerows(rowList)
# get isfinder genome annotation from csv files
def isfinder_IS_in_genome(isfinder_genome_file):
isfinder_genome = []
if not os.path.isfile(isfinder_genome_file):
return isfinder_genome
fp = open(isfinder_genome_file, newline='')
reader = csv.reader(fp, delimiter=',')
for row in reader:
isfinder_genome.append(row)
fp.close()
return isfinder_genome
# Group elements in a sequence by keys indexed by (i, ..., j).
# sequence: (seq1, ..., seq, ..., seqn)
# seqn: [s1, ..., sn]
# index: (i, ..., j), both i and j are integer
# keys: (key1, ..., key, ..., keyn)
# key: (seq[index_i], ..., seq[index_j])
# item_id: dictionary, {key1: [seq1, ..., seqm], ..., keyn: [seq3, ..., seqn]}
def group_by_key(sequence, index):
item_id = {}
for item in sequence:
key = []
for i in index:
key.append(item[i])
key = tuple(key)
if key not in item_id:
item_id[key] = [item]
else:
item_id[key].append(item)
return item_id
# Linearly rescale data values having observed oldMin and oldMax into a new arbitrary range newMin to newMax
# newValue = (newMax - newMin)/(oldMax - oldMin) * (value - oldMin) + newMin
# or
# newValue = a * value + b, when a = (newMax - newMin)/(oldMax - oldMin) and b = newMin - a * oldMin
def rescale(data, newMin, newMax):
oldMin, oldMax = min(data), max(data)
n = len(data)
# all values are equal
if not (oldMin < oldMax):
return (newMin,) * n
a = (newMax - newMin)/(oldMax - oldMin)
b = newMin - a * oldMin
newData = []
for value in data:
newData.append(a * value + b)
return newData
# return True if there is any digit in string
def hasNumbers(string):
return any(char.isdigit() for char in string)
# return True if there is a pair of round brackets in string
# Note: return False if ') precede ('.
def hasBrackets(s):
start = s.find('(')
if start == -1:
return False
start += 1
return ')' in s[start:]
# return string within '(' and ')'
def extract(s):
start = s.find('(')
if start == -1:
return ''
start += 1
end = s.find(')', start)
if end == -1:
return ''
else:
return s[start:end]
# Parse alignment file output by water in EMBOSS and return the alignment
# waterFile: character string, output returned by water
#
# collumns: seq1, seq2:
# 1->6 sequence number of the first residue in alignment
# 7 space character
# 8->-1 aligned sequence with gaps
# collumns: marker
# 1->6 space characters
# 7 space character
# 8->-1 character string composed of '|' (match), '.' (mismatch) and ' ' (gap)
def getAlignByWater(waterFile):
align = []
# if waterFile is file, then open it to readin
# elif waterFile is not a file, then just process it as regular character string.
if os.path.isfile(waterFile):
fp = open(waterFile)
waterFile = fp.read()
fp.close()
else:
waterFile = waterFile.decode()
scoreMarker = '# Score:'
indexScore = waterFile.find(scoreMarker) + len(scoreMarker)
score = float(waterFile[indexScore: waterFile.find('\n', indexScore)])
#print(waterFile)
#print('hello', waterFile[indexScore: waterFile.find('\n', indexScore)])
lines = [line for line in waterFile.split('\n') if line.strip() != '' and line[0] != '#']
if len(lines) > 0:
start1, end1 = int(lines[0][14:20]), int(lines[-3][-6:])
end2, start2 = int(lines[2][14:20]), int(lines[-1][-6:])
#print('hello1', lines[0][14:20], lines[-3][-6:], lines[2][14:20], lines[-1][-6:])
seq1 = marker = seq2 = ''
for tlines in zip(*[iter(lines)]*3):
seq1 += tlines[0][21:-7]
marker += tlines[1][21:]
seq2 += tlines[2][21:-7]
align = [score, (seq1, seq2, marker), (start1, end1, start2, end2)]
return align
# Given a DNA sequence strand, Return its complementary sequence strand
# mode:
# '1', seq is composed of only uppercase letters;
# '2', seq is composed of only lowercase letters;
# '3', seq can be the mixture of lower and upper case letters.
def complementDNA(seq, mode):
if mode == '1':
return seq.translate(str.maketrans(constants.na1u, constants.na2u))
elif mode == '2':
return seq.translate(str.maketrans(constants.na1l, constants.na2l))
elif mode == '3':
return seq.translate(str.maketrans(constants.na1ul, constants.na2ul))
else:
print('Error: incorrect mode in complementDNA', file=sys.stderr)
exit(0)
# Return a cleaned DNA sequence copy where non-standard bases are replaced by 'N'.
def cleanDNA(seq):
bases = []
#stdBases = 'ATCGRYN'
stdBases = 'ATCG'
for base in seq:
if base.upper() in stdBases:
bases.append(base)
else:
bases.append('N')
return ''.join(bases)
# convert cigar sting returned by SSW python interface into the pairs with one number and one character
# for example:
# Return [(4, 'M'), (2, 'I'), (8, 'M'), (1, 'D'), (10, 'M'), (6, 'S')] if cigarString == '4M2I8M1D10M6S'
def parseCigarString(cigarString):
return [(int(pair[:-1]), pair[-1]) for pair in re.findall(r'\d+[MIDNSHP=X]', cigarString)]
# print a alignment into a string
# seq1, seq2: sequences of two aligned DNA strand segment
# cigarStr: '4M2I8M1D10M6S'
# cigarPair: [(4, 'M'), (2, 'I'), (8, 'M'), (1, 'D'), (10, 'M'), (6, 'S')]
# Note: for details of cigar sting, Please check the document "The SAM Format Specification",
# http://samtools.github.io/hts-specs/SAMv1.pdf, particularly the "An example" section and
# "CIGAR: CIGAR string" section.
#
def buildAlignment(sequence1, sequence2, align, cigarStr):
begin1, end1, begin2, end2 = align.ref_begin+1, align.ref_end+1, align.query_begin+1, align.query_end+1
header = { 'conflict': False,
'score': align.score,
'begin1': begin1,
'end1': end1,
'begin2': begin2,
'end2': end2}
seq1name = 'seq1'
seq2name = 'seq2'
line1 = '{:<10} '.format(seq1name)
line2 = '{:<10} '.format(' ')
line3 = '{:<10} '.format(seq2name)
index1, index2 = 0, 0
line1 += '{:>8} '.format(begin1 + index1)
line3 += '{:>8} '.format(begin2 + index2)
line2 += '{:>8} '.format(' ')
# build alignment from the range defined by
# sequence1[align.ref_begin: align.ref_end+1] and sequence2[align.query_begin: align.query_end+1]
seq1 = sequence1[align.ref_begin: align.ref_end+1]
seq2 = sequence2[align.query_begin: align.query_end+1]
cigarPair = parseCigarString(cigarStr)
for pair in cigarPair:
if pair[1] == 'I':
line1 += '-' * pair[0]
line3 += seq2[index2: index2+pair[0]]
line2 += ' ' * pair[0]
index2 += pair[0]
elif pair[1] == 'D':
line1 += seq1[index1: index1+pair[0]]
line3 += '-' * pair[0]
line2 += ' ' * pair[0]
index1 += pair[0]
elif pair[1] == 'M':
s1 = seq1[index1: index1+pair[0]]
s2 = seq2[index2: index2+pair[0]]
line1 += s1
line3 += s2
for c1, c2 in zip(s1, s2):
if c1 == c2:
line2 += '|'
else:
line2 += '*'
index1 += pair[0]
index2 += pair[0]
elif pair[1] != 'S':
e = cigarStr + ' submittedSeq1:' + sequence1 + ' submittedSeq2:' + sequence2
raise RuntimeError(e)
#print(pair)
#print(line1)
#print(line2)
#print(line3)
line1 += ' {:<8}'.format(align.ref_begin + index1)
line3 += ' {:<8}'.format(align.query_begin + index2)
if align.ref_begin + index1 != align.ref_end+1 or align.query_begin + index2 != align.query_end+1:
header['conflict'] = True
header['end1'] = align.ref_begin + index1
header['end2'] = align.query_begin + index2
'''
fmt = 'Warning: alignment path conflicts with aligned range reported by SSW!\n'
fmt += ' seq1Begin:{} seq1Alinged:{} seq1End:{} seq2Begin:{} seq2Alinged:{} seq2End:{}\n'
fmt += ' cigarStr:{}\n alignedSeq1:{}\n alignedSeq2:{}\n submittedSeq1:{}\n submittedSeq2:{}'
w = fmt.format(
align.ref_begin+1, index1, align.ref_end+1,
align.query_begin+1, index2, align.query_end+1,
cigarStr,
seq1, seq2,
sequence1, sequence2)
print(w)
'''
return (header, line1, line2, line3)
# Build the match line between two aligned sequences.
# Return matchLine
# matchLine: character string, for example, '|*|* **|'
# seq1: character string, for example, 'TAGG--AGC'
# seq2: character string, for example, 'TGGACGGCC'
def buildMatchLine(seq1, seq2):
matchLine = ''
for c1, c2 in zip(seq1, seq2):
if c1 == c2:
matchLine += '|'
elif c1 == '-' or c2 == '-':
matchLine += ' '
else:
matchLine += '*'
return matchLine
# Shorten ir to the reasonable length
# Rules to shorten:
# Discard the aligned blocks when irId/irLen < 0.7 with irLen < constants.stringenShortestIR
# or irLen > constants.stringentLongestIR
# Discard the aligned blocks when irId/irLen < 0.6 with constants.stringentShortestIR <= irLen <= constants.stringentLongestIR
#
# ir: [] or [score, irId, irLen, nGaps, start1, end1, start2, end2, seq1, seq2]
#
def shortenIR(ir):
if len(ir) == 0:
return ir
line = buildMatchLine(ir[8], ir[9])
# for example line = '||||||*| ||*||| || ||| | | |||| *| |*||*|||||||||||||||'
# blocks = [list(g) for k,g in itertools.groupby(line)]
# blocks = [ ['|', '|', '|', '|', '|', '|'],
# ['*'],
# ['|'],
# [' '],
# ['|', '|'],
# ['*'],
# ['|', '|', '|'],
# [' '],
# ['|', '|'],
# [' '],
# ['|', '|', '|'],
# [' '],
# ['|'],
# [' ', ' ', ' '],
# ['|'],
# [' '],
# ['|', '|', '|', '|'],
# [' '],
# ['*'],
# ['|'],
# [' '],
# ['|'],
# ['*'],
# ['|', '|'],
# ['*'],
# ['|', '|', '|', '|', '|', '|', '|', '|', '|', '|', '|', '|', '|', '|', '|']]
# len(blocks) = 26
#ids = []
irId = 0
irLen = 0
for k,g in itertools.groupby(line):
g = list(g)
blockLen = len(g)
irLen += blockLen
if g[0] == '|':
irId += blockLen
#ids.append((irId, irLen))
#
# break loop, namely, stop growing alignment
if irId/irLen < constants.optIrIdentity:
break
elif (irLen < constants.stringentShortestIR or irLen > constants.stringentLongestIR) and irId/irLen < constants.stringentIrIdentity:
break
# ir: [] or [score, irId, irLen, nGaps, start1, end1, start2, end2, seq1, seq2]
# Build shorten ir
score = irId + irLen
nGaps = line[:irLen].count(' ')
seq1 = ir[8][:irLen]
seq2 = ir[9][:irLen]
gap1 = seq1.count('-')
gap2 = seq2.count('-')
end1 = ir[4] + irLen - gap1 - 1
end2 = ir[6] + irLen - gap2 - 1
return [score, irId, irLen, nGaps, ir[4], end1, ir[6], end2, seq1, seq2]
# Filter ir by applying more stringent criteria:
# ir: [] or [score, irId, irLen, nGaps, start1, end1, start2, end2, seq1, seq2]
# optIRsim, stringentIRsim: float, constants.optIrIdentity and constants.stringentIrIdentity
#
# 1) irId/irLen must be greater than stringentIrIdentity (0.7) if irLen < 5 or irLen > 55, else irId/irLen > optIrIdentity (0.6)
#
def filterIRbyCutoff(ir, optIRsim, stringentIRsim):
if len(ir) == 0:
return []
sim = ir[1]/ir[2]
if sim < optIRsim:
ir = []
elif (ir[2] < 5 or ir[2] > 55) and sim < stringentIRsim:
ir = []
return ir
# Find the number of matches in core regions which is composed of only consecutive matches.
# seq1: character string, for example, 'TAGGG--AGGC'
# seq2: character string, for example, 'TGGGACGGCGC'
# irIdCore: integer, number of matches in core regions
def getIrIdCore(seq1, seq2):
matchLine = buildMatchLine(seq1, seq2)
# search consecutive matches in matchLine
irIdCore = 0
# Option1:
# 1) core region composed of at least two consecutive matched bases
#for region in re.findall(r'\|{2,}', matchLine):
#
# Option2:
# 1) core region, composed of at least three consecutive matched bases
for region in re.findall(r'\|{3,}', matchLine):
#
irIdCore += len(region)
return irIdCore
# Return an empirical score for an ir. The greater the score is, the better the ir is.
# ir: [] or [score, irId, irLen, nGaps, start1, end1, start2, end2, seq1, seq2]
#
def irScore(ir):
if len(ir) == 0:
# set a very negative value as the score of no TIR
score = -9999.9
else:
irIdCore = getIrIdCore(ir[-2], ir[-1])
# mismatch = irLen - nGaps - irId
# irIdNonCore = irId - irIdCore
# Option0:
# score = 3(irIdCore - nGaps) + irIdNonCore - mismatch
# = 2(irIdCore - nGaps) + irId - (nGaps + mismatch)
# = 2(irIdCore - nGaps) + 2*irId - iLen
# = 2(irIdCore + irId - nGaps) - irLen
score = 2 * (irIdCore + ir[1] - ir[3]) - ir[2]
# score = irIdCore - nGaps + x
# Option1: x = irIdNonCore - mismatch
# score = irIdCore - nGaps + x
# = irIdCore - nGaps + (irIdNonCore - mismatch)
# = irIdCore + irIdNonCore - (nGaps + mismatch)
# = irId - (nGaps + mismatch)
# = 2*irId - irLen
# = irId - (nGaps + mismatch)
# Here, score == 0 means irId/irLen == 50%
#score = 2 * ir[1] - ir[2]
#
# Option2: x = (irIdNonCore - mismatch) / (irIdNonCore + mismatch + 1)
# Here, irIdNonCore == mismatch means x contributes zero to final score.
#
# Pay special attention to the following cases:
# irIdCore == nGaps and/or irIdNonCore == mismatch == 0
#
# score = irIdCore - nGaps + x
# socre = irIdCore - nGaps +
# (irId - irIdCore - (irLen-nGaps-irId)) / (irLen - irIdCore - nGaps + 1)
# = irIdCore - nGaps +
# (2*irId - irIdCore - irLen + nGaps)
# / (irLen - irIdCore - nGaps + 1)
# = irIdCore - nGaps +
# (2*irId - irLen - (irIdCore - nGaps))
# / (irLen -2*nGaps - (irIdCore - nGaps) + 1)
#
# set y = irIdCore - nGaps, then
# score = y + (2*irId - irLen - y) / (irLen - 2*nGaps - y + 1)
#y = irIdCore - ir[3]
#score = y + (2*ir[1] - ir[2] - y) / (ir[2] - 2*ir[3] - y + 1)
#
# Option3: x = - (irLen - match) /irLen
# score = irIdCore -nGaps + x
# = irIdCore -nGaps - (irLen - irId)/irLen
# = irIdCore -nGaps - (1 - irId/irLen)
# = irIdCore + irId/irLen - nGaps - 1
#score = irIdCore + ir[1]/ir[2] - ir[3] - 1
#
# Option4: x = (irIdNonCore - mismatch)/2
# score = 2*(irIdCore - nGaps) + irIdNonCore - mismatch
# = 3*irIdCore - 2*nGaps - mismatch
# = 3*irIdCore - nGaps - (irLen - irId)
#score = 3*irIdCore - ir[3] - ir[2] + ir[1]
return score
# Build name of matrix file holding match and mismatch values
# Matrix file example:
# EDNAFULL.2.6.IR.water, EDNAFULL.3.4.IR.water, EDNAFULL.3.6.IR.water, EDNAFULL.5.4.IR.water
def convert2matrixFile(match, mismatch, dir2matrix):
return os.path.join(dir2matrix, 'EDNAFULL.{}.{}.IR.water'.format(match, - mismatch))
# File name, EDNAFULL.2.6.IR.water, means match and mismatch are 2 and -6 respectively.
# example:
# matrixFile: EDNAFULL.2.6.IR.water
# Return: (2, -6)
def resolveMatrixFileName(matrixFile):
fileName = os.path.basename(matrixFile)
if fileName == '':
print('Error: matrix file required', matrixFile, file=sys.stderr)
exit(0)
else:
digits = fileName.split('.',3)[1:3]
return (int(digits[0]), - int(digits[1]))
# Convert (gapopen, gapextend, matrixfile) to (gapopen, gapextend, match, mismatch)
# filters4water: [filter4water, ..., filter4water]
# filter4water: (gapopen, gapextend, matrixfile)
# Return: filters
# filters: [filter, ..., filter]
# filter: (gapopen, gapextend, match, mismatch)
def convertFilters4water(filters4water):
filters = []
for filter in filters4water:
match, mismatch = resolveMatrixFileName(filter[2])
# convert mismatch from a negative value to a positive value
filters.append((filter[0], filter[1], match, -mismatch))
return filters
# Convert (gapopen, gapextend, match, mismatch) to (gapopen, gapextend, matrixfile)
# filters: [filter, ..., filter]
# filter: (gapopen, gapextend, match, mismatch), here mismatch > 0
# dir4embossdata: directory holding the matrix files used by water program
# filters4water: [filter4water, ..., filter4water]
# filter4water: (gapopen, gapextend, matrixfile)
def convertFilters2water(filters, dir2matrix):
filters4water = []
for filter in filters:
matrixFile = convert2matrixFile(filter[2], -filter[3], dir2matrix)
filters4water.append((filter[0], filter[1], matrixFile))
return filters4water
# Return filters shared by filters4ssw and filtersFromWater
def commonFilters(filters4ssw, filtersFromWater):
filters = []
for filter1 in filters4ssw:
for filter2 in filtersFromWater:
if filter1 == filter2:
filters.append(filter1)
return filters[-1:]
# A single measure that trades off precision versus recall is the F measure,
# which is the weighted harmonic mean of precision and recall:
# Fbeta = (1 + beta**2) * recall * precision / (recall + beta**2 * precision)
# Fbeta attaches beta times as much importance to recall as precision.
# Reference:
# <NAME>, <NAME> and <NAME>,
# Introduction to Information Retrieval, Cambridge University Press. 2008
#
# In our practice, we use beta = 1 or 2,
# F1 = 2 * recall * precision / (recall + precision)
# F2 = 5 * recall * precision / (recall + 4*precision)
# Here, precision = 1 - fdr, recall = sensitivity, fdr(false discovery rate) = false postive hit rate = nfp/nhits
def fmeasure(recall, precision, beta):
denominator = recall + beta**2 * precision
if denominator > 0:
return (1 + beta**2) * recall * precision / denominator
else:
return 0.0
# Check if hit piece is overlapped with isfinder annotation
# 0: hit and isfinder are located on different strands
# overlap/min(b-a, d-c): overlap > 0 if it is overlap, else overlap <= 0
# Note: IS element does not depends on strand while gene depends on strand.
# So, we don't need to compare strand when examing the overlap between
# two IS elements.
#
# The is_overlap() is retained as the alternative of is_overlap_min() or is_overlap_max()
# for the code compatibility.
def is_overlap(hit_strand, hit_begin, hit_end, strand, begin, end):
#if hit_strand != strand:
# return 0.0
# Simply ignore strand
a, b, c, d = hit_begin, hit_end, begin, end
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
if overlap > 0:
#return float(overlap) / (min(b-a, d-c) + 1)
return float(overlap) / (max(b-a, d-c) + 1)
#return float(overlap) / (b-a + 1)
else:
return 0.0
def is_overlap_min(hit_strand, hit_begin, hit_end, strand, begin, end):
# Simply ignore strand
a, b, c, d = hit_begin, hit_end, begin, end
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
if overlap > 0:
return float(overlap) / (min(b-a, d-c) + 1)
else:
return 0.0
def is_overlap_max(hit_strand, hit_begin, hit_end, strand, begin, end):
# Simply ignore strand
a, b, c, d = hit_begin, hit_end, begin, end
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
if overlap > 0:
return float(overlap) / (max(b-a, d-c) + 1)
else:
return 0.0
# Check if two ORFs (genes) are overlapped
# 0: hit and isfinder are located on different strands
# overlap/min(b-a, d-c): overlap > 0 if it is overlap, else overlap <= 0
#
def orf_overlap(orf1, orf2):
hit_strand, hit_begin, hit_end = orf1
strand, begin, end = orf2
if hit_strand != strand:
return 0.0
a, b, c, d = hit_begin, hit_end, begin, end
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
if overlap > 0:
return float(overlap) / (min(b-a, d-c) + 1)
else:
return 0.0
# Check if piece1 is overlapped with piece2.
# p1: (a, b), a <= b, float or int
# p2: (c, d), c <= d, float or int
#
def overlap(p1, p2):
a, b = p1
c, d = p2
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
if overlap > 0:
#return float(overlap) / (min(b-a, d-c) + 1)
return float(overlap) / (max(b-a, d-c) + 1)
#return float(overlap) / (b-a + 1)
else:
return 0.0
def overlap_min(p1, p2):
a, b = p1
c, d = p2
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
if overlap > 0:
return float(overlap) / (min(b-a, d-c) + 1)
else:
return 0.0
def intersection(p1, p2):
a, b = p1
c, d = p2
# a <=b and c <= d are satisfied
overlap = min(b, d) - max(a, c) + 1
return overlap
def intergap(p1, p2):
a, b = p1
c, d = p2
# a <=b and c <= d are satisfied
gap = max(a, c) - min(b, d) - 1
return gap
# makeblastdb -dbtype nucl -in output4FragGeneScan1.19_illumina_5/NC_002754.1.fna.ffn -out blastdb/NC_002754.1.fna.ffn
def seq2blastdb(seqFile, db):
cmd = constants.makeblastdb
cmdline = [cmd, '-dbtype nucl', '-in', seqFile, '-out', db]
do_cmd = shlex.split(' '.join(cmdline))
subprocess.check_call(do_cmd, shell=False, universal_newlines=False, stdout=subprocess.DEVNULL)
# delete the file pointing by full path f.
# f: /path/to/file, a character string
def deleteFile(f):
cmd = 'rm'
cmdline = [cmd, f]
do_cmd = shlex.split(' '.join(cmdline))
subprocess.check_call(do_cmd, shell=False, universal_newlines=False, stdout=subprocess.DEVNULL)
# Search all IS elements (Tpase) ORFs against IS element (Tpase ORF) database.
# command: blastn -query /home/data/insertion_sequence/output4FragGeneScan1.19_illumina_5/NC_002754.1.fna.ffn \
# -db /home/data/insertion_sequence/blastdb/NC_002754.1.fna.ffn -out blast.6 \
# -outfmt '6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore \
# nident qlen slen' -perc_identity 90 -dust no
# Note1: There is 0 hit when querying 1471545_1471675_- against NC_011891.1.orf.fna because blastn by default
# to enable filtering query sequence with DUST to mask low complex repeat sequence in query.
# So we must disable it with '-dust no'.
# Note2: blastn-short is BLASTN program optimized for sequences shorter than 50 bases. We will use it in blastn search
# when dealing with tir sequence as tir is usually shorter than 55 bases.
def doBlastn(query, db, out, strand='both', task='megablast', perc_ident=100):
blast = constants.blastn
outfmt = shlex.quote('6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen')
#perc_identity = str(constants.sim4iso * 100)
perc_identity = str(perc_ident)
if task == 'blastn-short':
wordsize = '7'
#wordsize = '5' # default value for blastn-short is 7 but we use smaller value because the length of tir is usually among 5<= length <=55.
#wordsize = '4' # wordsize must >= 4
elif task == 'megablast':
wordsize = '28' # default value for megablast
else:
wordsize = '11' # default value for blastn
cmd = [blast, '-query', query, '-db', db, '-out', out, '-outfmt', outfmt, '-perc_identity', perc_identity,
'-strand', strand, '-dust', 'no', '-task', task, '-word_size', wordsize]
do_cmd = shlex.split(' '.join(cmd))
if subprocess.call(do_cmd, shell=False, universal_newlines=False, stdout=subprocess.DEVNULL) != 0:
e = 'Fail to run {}'.format(cmd)
raise RuntimeError(e)
def blastnSearch(query, db, out, strand='both', task='megablast'):
blast = constants.blastn
outfmt = shlex.quote('6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen')
#perc_identity = str(constants.sim4iso * 100)
perc_identity = str(constants.SIM4ISO)
if task == 'blastn-short':
wordsize = '7'
#wordsize = '5' # default value for blastn-short is 7 but we use smaller value because the length of tir is usually among 5<= length <=55.
#wordsize = '4' # wordsize must >= 4
elif task == 'megablast':
wordsize = '28' # default value for megablast
else:
wordsize = '11' # default value for blastn
cmd = [blast, '-query', query, '-db', db, '-out', out, '-outfmt', outfmt, '-perc_identity', perc_identity, '-strand', strand, '-dust', 'no', '-task', task, '-word_size', wordsize]
do_cmd = shlex.split(' '.join(cmd))
if subprocess.call(do_cmd, shell=False, universal_newlines=False, stdout=subprocess.DEVNULL) != 0:
e = 'Fail to run {}'.format(cmd)
raise RuntimeError(e)
# Search all IS elements (Tpase) ORFs against IS element (Tpase ORF) database.
# Return out
#
# command: blastn -db db -perc_identity 90 -strand strand -dust no -task task -word_size wordsize -num_threads nthreads \
# -outfmt '6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen'
# Note1: query and outfile are stdin and stdou by default, respectively
# Note2: blastn-short is BLASTN program optimized for sequences shorter than 50 bases. We will use it in blastn search
# when dealing with tir sequence as tir is usually shorter than 55 bases.
#
def doBlastnOnStream(query, db, strand='both', task='megablast', perc_ident=100, nthreads=1):
blast = constants.blastn
outfmt = shlex.quote('6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen')
perc_identity = str(perc_ident)
num_threads = str(nthreads)
if task == 'blastn-short':
wordsize = '7'
#wordsize = '5' # default value for blastn-short is 7 but we use smaller value because the length of tir is usually among 5<= length <=55.
#wordsize = '4' # wordsize must >= 4
elif task == 'megablast':
wordsize = '28' # default value for megablast
else:
wordsize = '11' # default value for blastn
cmd = [blast,
'-db', db, '-perc_identity', perc_identity, '-strand', strand, '-dust', 'no',
'-task', task, '-word_size', wordsize, '-num_threads', num_threads,
'-outfmt', outfmt
]
do_cmd = shlex.split(' '.join(cmd))
blastn = subprocess.Popen(do_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
out, err = blastn.communicate(input=query)
return (out, err)
# Search protein sequence against protein database.
# command: blastp -db db -evalue 1e-10 -task task -num_threads nthreads \
# -outfmt '6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen'
# Note1: query and outfile are stdin and stdou by default, respectively
def doBlastpOnStream(query, db, task='blastp', e_value=1e-10, nthreads=1):
blast = constants.blastp
outfmt = shlex.quote('6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen')
evalue = str(e_value)
num_threads = str(nthreads)
cmd = [blast,
'-db', db, '-evalue', evalue, '-task', task, '-num_threads', num_threads,
'-outfmt', outfmt
]
do_cmd = shlex.split(' '.join(cmd))
blastn = subprocess.Popen(do_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
out, err = blastn.communicate(input=query)
return (out, err)
# blastn -query query -subject subject ....
def doBlastn2seqOnStream(nthread, query, subject, strand='both', task='megablast', perc_ident=100):
blast = constants.blastn
outfmt = shlex.quote('6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen')
perc_identity = str(perc_ident)
if task == 'blastn-short':
wordsize = '7'
#wordsize = '4' # wordsize must >= 4
elif task == 'megablast':
wordsize = '28' # default value for megablast
else:
wordsize = '11' # default value for blastn
num_threads = str(nthread)
cmd = [blast,
'-subject', subject, '-perc_identity', perc_identity, '-strand', strand, '-dust', 'no',
'-task', task, '-word_size', wordsize, '-outfmt', outfmt
#'-task', task, '-word_size', wordsize, '-outfmt', outfmt, '-num_threads', num_threads
# 'num_threads' is currently ignored when 'subject' is specified.
]
do_cmd = shlex.split(' '.join(cmd))
blastn = subprocess.Popen(do_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
out, err = blastn.communicate(input=query)
return (out, err)
# Get IS element copy number from the file output by blast search
# The output is produed by blastn with options:
# -outfmt '6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send nident qlen slen' \
# -perc_identity 90
# For example:
# command: blastn -query /home/data/insertion_sequence/output4FragGeneScan1.19_illumina_5/NC_002754.1.fna.ffn \
# -db /home/data/insertion_sequence/blastdb/NC_002754.1.fna.ffn -out blast.6 \
# -outfmt '6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore \
# nident qlen slen' -perc_identity 90 -dust no
# Read blast output and return hitpairs
# hitpairs: [hitpair, ..., hitpair]
# hitpair: {'qseqid': qseqid, 'sseqid': sseqid, \
# 'pident': pident, 'length': length, 'nident': nident, 'qlen': qlen, 'slen': slen}
#
# Input:
# file: blast output file
# min4coverage: coverage cutoff = alignedLength/min(qlen,slen)
# seqtype: 'regular' for non-short sequence, 'short' for short sequence like tir in IS element
def getBlastout(file, min4coverage):
fp = open(file, 'r')
hits = []
for line in fp:
words = line.split()
pident = float(words[2])
# retain only hit with exact silva full-length gene sequence
if pident < 100:
continue
length = int(words[3])
nident = int(words[12])
qlen = int(words[13])
slen = int(words[14])
# retain only long gene sequence
if length < 1200:
continue
# retain only hit with exact silva full-length gene sequence
#if nident != slen:
# continue
# check the coverage of aligned length vs min(querylen, subjectlen),
#if length < int(min(qlen,slen) * min4coverage):
# continue
hit = {}
hit['qseqid'] = words[0]
hit['sseqid'] = words[1]
hit['pident'] = float(words[2])
hit['length'] = length
hit['nident'] = nident
# coordinates of alignment in query sequence and subject sequence
hit['qstart'] = int(words[6])
hit['qend'] = int(words[7])
hit['sstart'] = int(words[8])
hit['send'] = int(words[9])
hit['qlen'] = qlen
hit['slen'] = slen
hits.append(hit)
return hits
def getBlastResult(file, min4coverage):
fp = open(file, 'r')
hits = []
for line in fp:
words = line.split()
qlen = int(words[13])
slen = int(words[14])
pident = float(words[2])
length = int(words[3])
# check the coverage of aligned length vs min(querylen, subjectlen)
if length < int(min(qlen,slen) * min4coverage):
continue
hit = {}
# words[0] example: 'gi|15896971|ref|NC_002754.1|_1_4458_+', 'NC_002754.1_1_4458_+',
# '1_4458|15_4443_+', '15_4443_+', '0', '11'
hit['qseqid'] = words[0]
# words[1] example: 'gi|15896971|ref|NC_002754.1|_1_4458_+' or 'NC_002754.1_1_4458_'
# '1_4458|15_4443_+', '15_4443_+', '0', '11'
hit['sseqid'] = words[1]
hit['pident'] = float(words[2])
hit['length'] = length
hit['nident'] = int(words[12])
hit['qlen'] = qlen
hit['slen'] = slen
hits.append(hit)
return hits
# get results produced by blastp
def getBlastpResultOnStream(filec):
fp = io.StringIO(filec)
hits = []
ids4query = set()
for line in fp:
line = line.strip()
words = line.split()
# -outfmt '6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore nident qlen slen'
qseqid,sseqid,pident,length,mismatch,gapopen,qstart,qend,sstart,send,evalue,bitscore,nident,qlen,slen = words
# Only get the first hit for each query sequence as the hits related the same query are ordered by evalue and we just need the IS family name from the subject hit
# with the smallest evalue.
if qseqid in ids4query:
continue
else:
ids4query.add(qseqid)
hit = {}
hit['qseqid'] = qseqid
hit['sseqid'] = sseqid
hit['length'] = length
hit['qstart'] = qstart
hit['qend'] = qend
hit['sstart'] = int(sstart)
hit['send'] = int(send)
hit['evalue'] = float(evalue)
hit['nident'] = int(nident)
hit['qlen'] = qlen
hit['slen'] = int(slen)
hit['pident'] = float(pident)
hits.append(hit)
return hits
def getBlastResult4dnaOnStream(filec):
fp = io.StringIO(filec)
hits = []
for line in fp:
words = line.split()
# check the coverage of aligned length vs min(querylen, subjectlen)
# ignore the alignment with aligned length < isMin (usually isMin = 400, namely, shortest IS element)
length = int(words[3])
#if length < constants.isMin:
# continue
#items = words[0].rsplit('_', maxsplit=6)
#minLen4is = int(items[1]) # minimal length of IS in the specific family which ORF belongs to
# word[0]: seqid_family_cluster_seqbegin_seqend_orfBegin_orfEnd_orfStrand
items = words[0].rsplit('_', maxsplit=7)
family = items[1]
# cluster = items[2]
minLen4is = constants.minMaxLen4is[family][0]
if length < minLen4is:
continue
seqbegin = int(items[3])
seqend = int(items[4])
orfBegin = int(items[5])
orfEnd = int(items[6])
orfStrand = items[7]
qlen = int(words[13])
if seqend-seqbegin+1 != qlen:
e = 'Error: different lengths for query sequence {}: {}({}-{}+1) {}'.format(
words[0], seqend-seqbegin+1, seqend, seqbegin, qlen)
RuntimeError(e)
start = int(words[6])
end = int(words[7])
move = start - 1
if orfStrand == '+':
qstart = seqbegin + move
qend = qstart + length - 1
else:
qend = seqend - move
qstart = qend - length + 1
orfLen = orfEnd - orfBegin + 1
intersect = intersection((orfBegin, orfEnd), (qstart, qend))
if intersect < 1:
continue
copy = False
#minLen4orf4tpase = constants.minMax4tpase[family][0]
#maxLen4orf4tpase = constants.minMax4tpase[family][1]
minLen4orf4pep = constants.minMax4tpase[family][2]
#
# Note: it means gene prediction may be not accuracy when predicted ORF is longer
# than the longest tpase ORF in IS family.
#if intersect < orfLen*constants.minOverlap4orf2aligned:
#
# alignment overlapped with whole orf or part of orf
#if orfLen <= maxLen4orf and intersect >= orfLen*constants.minOverlap4orf2aligned:
#if orfLen <= maxLen4orf and intersect >= orfLen*0.9:
#
# alignment covers the full orf, namely, length >= orfLen and intersect == orfLen
#if orfLen <= maxLen4orf and qstart <= orfBegin < orfEnd <= qend:
#if orfLen <= maxLen4orf and intersect == orfLen:
#if orfLen <= maxLen4orf and intersect == orfLen:
# copy = True
# alignment overlapped with part of orf
#if orfLen > maxLen4orf and intersect >= orfLen*constants.minOverlap4orf2aligned4longORF:
#if orfLen > maxLen4orf and intersect >= orfLen*0.5:
#if orfLen > maxLen4orf and intersect >= minLen4orf:
if orfLen >= minLen4orf4pep:
if intersect >= minLen4orf4pep:
copy = True
else:
if intersect >= orfLen:
copy = True
if copy == False:
#print('intersection({}) between {} and {} in alignment is less than threshold: \n{}'.format(
# intersect, (orfBegin, orfEnd, orfStrand), (qstart, qend), line))
continue
#else:
#print('hello copy', intersect, (orfBegin, orfEnd, orfStrand), (qstart, qend))
hit = {}
# words[0] example: 'gi|556503834|ref|NC_000913.3|_IS200/IS605_12_1502643_1505379_1503157_1504865_+'
hit['qseqid'] = words[0]
hit['orfBegin'] = orfBegin
hit['orfEnd'] = orfEnd
# words[1] example: 'gi|15896971|ref|NC_002754.1|
hit['sseqid'] = words[1]
hit['length'] = length
hit['qstart'] = qstart
hit['qend'] = qend
hit['sstart'] = int(words[8])
hit['send'] = int(words[9])
hit['nident'] = int(words[12])
hit['qlen'] = qlen
hit['slen'] = int(words[14])
hit['pident'] = float(words[2])
hits.append(hit)
return hits
# create dir if it does not exist yet
def makedir(dir):
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
# Get the genome coordinates of each gene from proteome file
# Return genes
#
# genes: [(strand, begin, end), ..., (strand, begin, end)]
# strand: character, '+' or '-'
# begin, end: int, genome coordinates
def get_proteome(proteome_file):
genes = []
fp = open(proteome_file, 'r')
for line in fp:
if line[0] != '>':
continue
location = line.strip().rsplit('_', 3)[-3:]
genes.append((location[2], int(location[0]),int(location[1])))
fp.close()
return genes
# cdss: [(seqid, id,seq), ...]
# seqid: sequence identifier, e.g. SRS075404_LANL_scaffold_1, C3691328
# id: cds identifier, e.g. SRS075404_LANL_scaffold_1_1_414_+, C3691328_7626_8378_-
def getcds(file):
cdss = []
seq = []
with open(file, 'r') as fp:
for line in fp:
line = line.strip()
if line == '':
continue # remove blank line
if line[0] == '>':
if len(seq) > 0:
cdss.append((seqid, id,''.join(seq)))
id = line[1:].split(maxsplit=1)[0]
seqid = id.rsplit('_',maxsplit=3)[0]
seq = []
continue
seq.append(line)
cdss.append((seqid, id,''.join(seq)))
return cdss
# read genebank .fna file and return the identifier of sequence
def rdGbFna(file):
fp = open(file, 'r')
for line in fp:
if line.replace('\b','').strip() == '':
continue # remove blank line
if line[0] == '>':
seqid = line.strip().split(maxsplit=1)[0][1:]
break
fp.close()
return seqid
# read genebank .faa file and return the peptide sequences
# prot: [p, ..., p]
# p: {'id': id, 'pid': pid, 'seq': seq, ...}
def rdGbFaa(file):
prot = []
seq = []
fp = open(file, 'r')
for line in fp:
if line.replace('\b','').strip() == '':
continue # remove blank line
if line[0] == '>':
if len(seq) > 0:
p['seq'] = ''.join(seq)
prot.append(p)
p = {}
p['id'] = line[1:].split(maxsplit=1)[0]
p['pid'] = p['id'].split('|',2)[1]
seq = []
continue
seq.append(line.strip())
fp.close
p['seq'] = ''.join(seq)
prot.append(p)
return prot
# read genebank .ptt file and return the locations and PIDs
# geneLoc: [loc, ..., loc]
# loc: {'loc': [begin, end, strand], 'pid': pid}
# begin, end: character string
# pid: character string
def rdGbPtt(file):
geneLoc = []
fp = open(file, 'r')
# remove heading lines in .ptt file
for line in fp:
if line[:8] == 'Location':
break
# get data
for line in fp:
loc = {}
items = line.split(maxsplit=4)
locations = items[0].split('..', 1)
loc['loc'] = [locations[0], locations[1], items[1]]
loc['pid'] = items[3]
geneLoc.append(loc)
fp.close
return geneLoc
# Convert GeneBank protein info (NC_000913.faa and NC_000913.ptt)
# into FragGeneScan protein file format(NC_000913.fna.faa)
def gb2fgs4protein(gbFna, gbFaa, gbPtt, fgs):
# make directories
makedir(os.path.dirname(fgs))
fp4fgs = open(fgs, 'w')
# seqid: eg. 'gi|15644634|ref|NC_000915.1|'
seqid = rdGbFna(gbFna)
prot = rdGbFaa(gbFaa)
geneLoc = rdGbPtt(gbPtt)
for p in prot:
for loc in geneLoc:
if p['pid'] == loc['pid']:
locStr = '_'.join(loc['loc'])
fasta = fastaFormat('_'.join([seqid, locStr]), p['seq'])
print(fasta, file=fp4fgs)
break
fp4fgs.close()
# gbk: {'accver':accver, 'gi':gi, 'prots':prots, 'seq':seq, ...}
# accver: accession.version, eg. 'NC_000915.1'
# gi: gi, eg. '15644634', character string
# prots: [prot, ..., prot]
# prot: {'orf':orf, 'pep': pep}
# orf: (start, end, strand), genome coordinates of cds in .gbk file,
# start and end are int, strand is character
# pep: amino acid sequence, character string
# seq: nucleic acid sequence
def rdGbk(gbkFile):
gbk = {}
gbk['prots'] = []
fp4gbkFile = open(gbkFile, 'r')
for line in fp4gbkFile:
if 'VERSION ' == line[:12]:
items = line[12:].split(maxsplit=2)
gbk['accver'] = items[0]
if len(items) > 1:
gbk['gi'] = items[1]
continue
if ' CDS ' == line[:21]:
if line[21] == 'c' and line[32].isdigit():
strand = '-'
coord = line[32:].strip().strip(')').strip().split('..', maxsplit=1)
elif line[21].isdigit():
strand = '+'
coord = line[21:].strip().split('..', maxsplit=1)
# not normal CDS, for example:
# ' CDS join(58474..59052,59052..59279)'
# ' CDS complement(join(380844..381259,382591..382872))'
else:
continue
while True:
nextLine = next(fp4gbkFile)
if ' /translation=' == nextLine[:34]:
nopep = False
break
# special case in NC_000913.gbk:
# CDS(272847..273954) has no translated protein sequence.
elif len(nextLine[:21].strip()) > 0:
nopep = True
break
if nopep == True:
continue
prot = {}
start, end = [int(x) for x in coord]
prot['orf'] = (start, end, strand)
gbk['prots'].append(prot)
pep = []
if nextLine[36:].strip()[-1] != '"':
pep.append(nextLine[36:].strip())
# short peptide sequence hold by a single line in .gbk file
else:
gbk['prots'][-1]['pep'] = nextLine[36:].strip()[:-1]
continue
nextLine = next(fp4gbkFile).strip()
while nextLine[-1] != '"':
pep.append(nextLine)
nextLine = next(fp4gbkFile).strip()
pep.append(nextLine[:-1])
gbk['prots'][-1]['pep'] = ''.join(pep)
continue
if 'ORIGIN' == line.strip():
seq = []
nextLine = next(fp4gbkFile)
while nextLine[:2] != '//':
seq.append(nextLine[10:].strip().replace(' ',''))
nextLine = next(fp4gbkFile)
gbk['seq'] = ''.join(seq).upper()
fp4gbkFile.close()
return gbk
# Convert GeneBank protein info (NC_000913.gbk) into FragGeneScan protein file format (NC_000913.fna.faa)
def gbk2fgs4protein(fnaFile, gbkFile, fgsFile):
# make directories
makedir(os.path.dirname(fgsFile))
fp4fgsFile = open(fgsFile, 'w')
# seqid: eg. 'gi|15644634|ref|NC_000915.1|'
seqid = rdGbFna(fnaFile)
# gbk: {'accver':accver, 'gi':gi, 'prots':prots, 'seq':seq, ...}
gbk = rdGbk(gbkFile)
if seqid.strip('|').rsplit('|', maxsplit=1)[1] != gbk['accver']:
print('Warning: {} and {} may not the same sequence'.format(fnaFile, gbkFile))
for prot in gbk['prots']:
orfStr = '_'.join([str(x) for x in prot['orf']])
header = '_'.join([seqid, orfStr])
fasta = fastaFormat(header, prot['pep'])
print(fasta, file=fp4fgsFile)
fp4fgsFile.close()
# For each orf, replace seqid with accid
def seqid2accid(mhits):
mhitsNew = {}
for seqid in mhits:
# seqid: 'gi|220910783|ref|NC_011886.1|'
# accid: 'NC_011886.1'
accid = seqid.rstrip('|').rsplit('|',1)[-1]
hitsNew = []
for hit in mhits[seqid]:
begin, end, strand = hit['orf'][1:]
hit['orf'] = (accid, begin, end, strand)
hitsNew.append(hit)
mhitsNew[accid] = hitsNew
return mhitsNew
# For each orf, replace seqid with fileid
def seqid2fileid(mhits):
mhitsNew = {}
for seqid in mhits:
# seqid: 'gi|220910783|ref|NC_011886.1|'
# accid: 'NC_011886.1'
accid = seqid.rstrip('|').rsplit('|',1)[-1]
fileid = accid.split('.', maxsplit=1)[0]
hitsNew = []
for hit in mhits[seqid]:
begin, end, strand = hit['orf'][1:]
hit['orf'] = (fileid, begin, end, strand)
hitsNew.append(hit)
mhitsNew[fileid] = hitsNew
return mhitsNew
# replace non ATCTU letter with unknown in seq
# seq: single letter DNA/RNA sequence
def qc4fna(seq, unknown='N'):
return re.sub('[^ATCGU]', unknown, seq.upper())
# dnaFiles: [(file, org), ..., (file, org)]
def rdDNAlist(dnaListFile):
dnaFiles = []
fp = open(dnaListFile, 'r')
for line in fp:
file = line.strip()
if file == '' or file[0] == '#':
continue
dirs = os.path.dirname(file)
org = os.path.basename(dirs)
dnaFiles.append((file, org))
fp.close()
return dnaFiles
# Read and return the summarization in file created by outputIndividual() in pred.py
# return: [] or [nis, %genome, bps4is, len4DNA, familySum]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
#
# Note: return [1] if sumFileByOrg is the *.sum file created by outputIS4multipleSeqOneFile() in pred.py.
def getSumByOrg(sumFileByOrg, org):
if os.path.isfile(sumFileByOrg):
fp2sumByOrg = open(sumFileByOrg, 'r')
else:
print('In getSumByOrg() in tools.py: no such file to read', sumFileByOrg)
return []
familySum = {}
sumByOrg = []
flag4hmpformat = False
for line in fp2sumByOrg:
if 'dnaLen' in line:
flag4hmpformat = True
break
if line[0] == '#':
continue # remove comment line
if line[:5] == 'total':
sumByOrg = line.split()
break
if line[:6] == 'family' or line[:6] == '------':
continue # title line
if line.replace('\b','').strip() == '':
continue # remove blank line
items = line.split()
familySum[items[0]] = [int(items[1]), float(items[2]), int(items[3])]
fp2sumByOrg.close()
if flag4hmpformat == True:
# Return and notify the calling code in parent function that the *.sum file (sumFileByOrg) to read
# is the file format which requires getSumByOrg4hmp() instead of getSumByOrg() to process.
return [1]
elif len(sumByOrg) > 0:
return [int(sumByOrg[1]), float(sumByOrg[2]), int(sumByOrg[3]), int(sumByOrg[4]), familySum]
else:
return []
# Read and return the summarization in file written by outputIS4multipleSeqOneFile() in pred.py
# return: [] or [nis, %genome, bps4is, seqLen4bps, familySum, seqLen]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
def getSumByOrg4hmp(sumFileByOrg, org):
if os.path.isfile(sumFileByOrg):
fp2sumByOrg = open(sumFileByOrg, 'r')
else:
print('In getSumByOrg4hmp() in tools.py: no such file to read', sumFileByOrg)
return []
familySum = {}
sumByOrg = []
dnaLen4is = {}
# dnaLen4is: {seqid:seqlen, ...}
for line in fp2sumByOrg:
line = line.strip()
if line[0] == '#' or 'family ' in line:
continue # remove comment line and title line
if line.replace('\b','').strip() == '':
continue # remove blank line
items = line.split()
if items[1] == 'total':
#sumByOrg = line.split()
sumByOrg = items
break
seqid = items[0]
family = items[1]
nis = int(items[2])
bps4is = int(items[4])
seqlen4is = int(items[5])
#familySum[items[0]] = [int(items[1]), float(items[2]), int(items[3])]
#print('hello', sumFileByOrg, items[1:5])
if family not in familySum.keys():
familySum[family] = [0, 0.0, 0]
familySum[family] = [familySum[family][0]+nis, 0.0, familySum[family][2]+bps4is]
if seqid not in dnaLen4is.keys():
dnaLen4is[seqid] = seqlen4is
fp2sumByOrg.close()
if len(sumByOrg) > 0:
seqLen4is = sum(dnaLen4is.values())
dnaLen = int(sumByOrg[5])
for family,value in familySum.items():
familySum[family][1] = 100 * familySum[family][2] / dnaLen # percentage
return [int(sumByOrg[2]), float(sumByOrg[3]), int(sumByOrg[4]), seqLen4is, familySum, dnaLen]
else:
return []
familyNames = [
'IS1',
'IS110',
'IS1182',
'IS1380',
'IS1595',
'IS1634',
'IS200/IS605',
'IS21',
'IS256',
'IS3',
'IS30',
'IS4',
'IS481',
'IS5',
'IS6',
'IS607',
'IS630',
'IS66',
'IS701',
'IS91',
'IS982',
'ISAS1',
'ISAZO13',
'ISH3',
'ISKRA4',
'ISL3',
'ISNCY',
'new',
]
# output summarization for IS elements for an organism or multiple organisms
# sum: {seqid: sum4seq, ..., seqid: sum4seq}
# sum4seq: [] or [nis, %genome, bps4is, dnaLen4is, familySum, dnaLen, ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
def output4sum(sum4is, outfile):
fmt4title4families = ' {:>11} {:>11} {:>10}'
fmt4families = ' {:>11} {:>11.2g} {:>10}'
fp = open(outfile, 'w')
fmt4title = '{:<90} {:>6} {:>7} {:>10} {:>10}'
fmt = '{:<90} {:>6} {:>7.2g} {:>10} {:>10}'
# print headline in table
fp.write(fmt4title.format('organism', 'nIS', '%genome', 'bps4IS', 'dnaLen4is'))
familyNames.sort()
for family in familyNames:
fp.write(fmt4title4families.format(family, '%genome', 'bps4IS'))
fp.write('\n')
# print data in table
# initialize variables
nis2sum = 0
bps4is2sum = 0
dnaLen2sum = 0
nis4family2sum = {}
bps4is4family2sum = {}
for family in familyNames:
nis4family2sum[family] = 0
bps4is4family2sum[family] = 0
# sum: {seqid: sum4seq, ..., seqid: sum4seq}
# sum4seq: [] or [nis, %genome, bps4is, len4DNA, familySum]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
for org in sorted(sum4is.keys()):
sum4org = sum4is[org]
# initialize data for each family
nis4org, percent4org, bps4is, dnaLen = 0, 0.0, 0, 0
familySum = {}
for family in familyNames:
familySum[family] = [0, 0.0, 0]
# get available data for each family
if len(sum4org) > 0:
nis4org, percent4org, bps4is, dnaLen = sum4org[:4]
for family, value in sum4org[4].items():
familySum[family] = value
fp.write(fmt.format(org, nis4org, percent4org, bps4is, dnaLen))
nis2sum += nis4org
bps4is2sum += bps4is
dnaLen2sum += dnaLen
# write data for each IS element family
for family in sorted(familySum.keys()):
nis, percent, bps4is = familySum[family]
fp.write(fmt4families.format(nis, percent, bps4is))
nis4family2sum[family] += nis
bps4is4family2sum[family] += bps4is
fp.write('\n')
# print total summary in table
fp.write(fmt.format('total', nis2sum, (bps4is2sum/dnaLen2sum)*100, bps4is2sum, dnaLen2sum))
for family in sorted(familySum.keys()):
fp.write(fmt4families.format(nis4family2sum[family],
(bps4is4family2sum[family]/dnaLen2sum)*100,
bps4is4family2sum[family]))
fp.write('\n')
fp.close()
# output summarization for IS elements for an organism or multiple organisms
# sum: {seqid: sum4seq, ..., seqid: sum4seq}
# sum4seq: []
# or [nis, %genome, bps4is, dnaLen4is, familySum, dnaLen, ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage]
# familySum: {family: [nis, %genome, bps4is, norg4is], ..., family: [nis, %genome, bps4is, norg4is]}
def output4sumFull(sum4is, outfile):
fmt4title4families = ' {:>11} {:>13} {:>15} {:>13}'
fmt4families = ' {:>11} {:>13.2g} {:>15} {:>13}'
fp = open(outfile, 'w')
fmt4title = '{:<90} {:>6} {:>7} {:>15} {:>15} {:>15} {:>10} {:>7} {:>11} {:>8} {:>9} {:>6}'
fmt = '{:<90} {:>6} {:>7.2g} {:>15} {:>15} {:>15} {:>10} {:>7} {:>11} {:>8} {:>9} {:>6}'
# print headline in table
fp.write(fmt4title.format(
'organism', # name of species
'nIS', # number of ISs occuring in the specific species
'%genome', # bps4is / dnaLen
'bps4is', # bps covered by IS
'dnaLen4is', # bps of DNA sequence where IS occurs
'dnaLen', # bps of DNA sequences in the specific species
'ngenome4is', # number of chromosome DNA sequences where IS occurs
'ngenome', # number of chromosome DNA sequences in the specific species
'nplasmid4is', # number of plasmid DNA sequences where IS occurs
'nplasmid', # number of plasmid DNA sequences in the specific species
'nphage4is', # number of phage DNA sequences where IS occurs
'nphage' # number of phage DNA sequences in the specific species
))
familyNames.sort()
for family in familyNames:
fp.write(fmt4title4families.format(
family, # family name
family+'_%', # family_bps / dnaLen
family+'_bps', # bps covered by the specific family
family+'_s' # number of species where the specific family occurs
))
fp.write('\n')
# print data in table
# initialize variables
nis2sum, bps4is2sum, dnaLen4is2sum = 0, 0, 0
dnaLen2sum = 0
ngenome4is2sum, ngenome2sum, nplasmid4is2sum, nplasmid2sum, nphage4is2sum, nphage2sum = 0, 0, 0, 0, 0, 0
nis4family2sum = {}
bps4is4family2sum = {}
nstatus4is4family2sum = {}
for family in familyNames:
nis4family2sum[family] = 0
bps4is4family2sum[family] = 0
nstatus4is4family2sum[family] = 0
# sum: {seqid: sum4seq, ..., seqid: sum4seq}
# sum4seq: [] or [nis, %genome, bps4is, len4DNA, familySum]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
for org in sorted(sum4is.keys()):
sum4org = sum4is[org]
# initialize data for each family
nis4org, percent4org, bps4is, dnaLen4is = 0, 0.0, 0, 0
dnaLen = 0
ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage = 0, 0, 0, 0, 0, 0
familySum = {}
for family in familyNames:
familySum[family] = [0, 0.0, 0]
# get available data for each family
if len(sum4org) > 0:
nis4org, percent4org, bps4is, dnaLen4is = sum4org[:4]
dnaLen, ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage = sum4org[5:]
for family, value in sum4org[4].items():
familySum[family] = value
fp.write(fmt.format(org, nis4org, percent4org, bps4is, dnaLen4is,
dnaLen, ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage))
nis2sum += nis4org
bps4is2sum += bps4is
dnaLen4is2sum += dnaLen4is
dnaLen2sum += dnaLen
ngenome4is2sum += ngenome4is
ngenome2sum += ngenome
nplasmid4is2sum += nplasmid4is
nplasmid2sum += nplasmid
nphage4is2sum += nphage4is
nphage2sum += nphage
# write data for each IS element family
for family in sorted(familySum.keys()):
nis, percent, bps4is = familySum[family]
if nis > 0:
status = 1
else:
status = 0
fp.write(fmt4families.format(nis,
percent, bps4is, status
))
nis4family2sum[family] += nis
bps4is4family2sum[family] += bps4is
nstatus4is4family2sum[family] += status
fp.write('\n')
# print total summary in table
if dnaLen2sum == 0:
percentByBps = 0
else:
percentByBps = (bps4is2sum/dnaLen2sum)*100
fp.write(fmt.format('total', nis2sum, percentByBps, bps4is2sum, dnaLen4is2sum,
dnaLen2sum, ngenome4is2sum, ngenome2sum, nplasmid4is2sum, nplasmid2sum, nphage4is2sum, nphage2sum))
for family in sorted(familySum.keys()):
if dnaLen2sum == 0:
percentByBps2sum = 0
else:
percentByBps2sum = (bps4is4family2sum[family]/dnaLen2sum)*100
fp.write(fmt4families.format(nis4family2sum[family],
percentByBps2sum,
bps4is4family2sum[family],
nstatus4is4family2sum[family]
))
fp.write('\n')
fp.close()
# Read and return the summarization in file written by output4sum(sum4is, outfile)
# return: [] or [nis, %genome, bps4is, len4DNA, familySum]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
def getSum(sumFileByOrg, org):
if os.path.isfile(sumFileByOrg):
fp = open(sumFileByOrg, 'r')
else:
print('No valid IS element was found for', org)
return []
familySum = {}
sumByAll = []
for line in fp:
if line[:5] == 'total':
sumByAll = line.split()
break
if line[:8] == 'organism':
familys = line.split()[5:][0::3] # get IS family names
familySum = {}
if len(sumByAll) > 0:
nis = int(sumByAll[1])
percent = float(sumByAll[2])
bps4is = int(sumByAll[3])
len4DNA = int(sumByAll[4])
data4familys = sumByAll[5:]
for i, family in enumerate(familys):
familySum[family] = [int(data4familys[i*3]), float(data4familys[i*3+1]), int(data4familys[i*3+2])]
return [nis, percent, bps4is, len4DNA, familySum]
else:
return []
# Read and return the summarization in file written by output4sum(sum4is, outfile)
# return: []
# or [nis, %genome, bps4is, dnaLen4is, familySum, dnaLen, ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage]
# familySum: {family: [nis, %genome, bps4is], ..., family: [nis, %genome, bps4is]}
def getSumFull(sumFileByOrg, org):
if os.path.isfile(sumFileByOrg):
fp = open(sumFileByOrg, 'r')
else:
print('In getSumFull() in tools.py: no valid IS element was found for', org)
return []
familySum = {}
sumByAll = []
for line in fp:
if line[:5] == 'total':
sumByAll = line.split()
break
if line[:8] == 'organism':
familys = line.split()[12:][0::4] # get IS family names
familySum = {}
if len(sumByAll) > 0:
nis = int(sumByAll[1])
percent = float(sumByAll[2])
bps4is = int(sumByAll[3])
dnaLen4is = int(sumByAll[4])
dnaLen = int(sumByAll[5])
ngenome4is = int(sumByAll[6])
ngenome = int(sumByAll[7])
nplasmid4is = int(sumByAll[8])
nplasmid = int(sumByAll[9])
nphage4is = int(sumByAll[10])
nphage = int(sumByAll[11])
data4familys = sumByAll[12:]
for i, family in enumerate(familys):
familySum[family] = [int(data4familys[i*4]), float(data4familys[i*4+1]), int(data4familys[i*4+2])]
return [nis, percent, bps4is, dnaLen4is, familySum, dnaLen, ngenome4is, ngenome, nplasmid4is, nplasmid, nphage4is, nphage]
else:
return []
# Return metainfo in fileid
# metainfo: {'dnaType': a, 'dnaLen': dnaLen}, a can be 0,1,2 to represent phage or plasmid or genome DNA
def meta4genome(dir, org, fileid):
#fnafile = os.path.join(dir, org, fileid+'.fna')
fnafile = os.path.join(dir, org, fileid)
seqs = getFastaFull(fnafile)
# seqs: [(header, seq), ..., (header, seq)]
# get the first sequence in the fasta file which may contains one or multiple sequences
header, seq = seqs[0]
metainfo = {}
metainfo['dnaLen'] = len(seq)
if 'phage' in header.lower():
metainfo['dnaType'] = 0
elif 'plasmid' in header.lower():
metainfo['dnaType'] = 1
else:
metainfo['dnaType'] = 2
return metainfo
# Get mDNA from .fna file list like bacteria.fna.list
# Return: [mDNA, fileids]
# mDNA: {seqid: (org, fileid, sequence), ..., seqid: (org, fileid, sequence)}
# fileids: [(fileid, org), ..., (fileid, org)]
# Input: file
# file: .fna file list, for example, bacteria.fna.list
def fnaFileList2mDNA(filelist):
fileids = []
mDNA = {}
dnaFiles = rdDNAlist(filelist)
for item in dnaFiles:
dnafile, org = item
filename = os.path.basename(dnafile)
#fileid = filename.rsplit('.', 1)[0]
fileid = filename
fileids.append((fileid, org))
# seq: [(id, seq), ..., (id, seq)]
seqs = getFasta(dnafile)
# simply get the first sequence in fasta file.
if len(seqs) > 0 and len(seqs[0]) > 0:
mDNA[seqs[0][0]] = (org, fileid, seqs[0][1])
else:
print('Warning: no sequence found in', dnafile)
dir4data = os.path.dirname(os.path.dirname(dnafile))
return [mDNA, dir4data]
# compute distance between vectors u and v
def distFunction(u, v):
start1, end1 = u
start2, end2 = v
intersect = min(end1, end2) - max(start1, start2) + 1
if intersect > 0:
d = 1/intersect
else:
d = 10 - intersect
return d
def distFunctionByoverlap_min(p1, p2):
a, b = p1
c, d = p2
# a <=b and c <= d are satisfied
intersect = min(b, d) - max(a, c) + 1
if intersect > 0:
overlap = float(intersect) / (min(b-a,d-c)+1)
else:
overlap = 0.0
return 1 - overlap
# Get the distribution of a series of integers along a series of windows defined by item and cutoff.
# algorithm:
# For n items in a list, ilist, we create n windows, (i-cutoff, i+cutoff) where the windows satisfy i-cutoff >= 1.
# Count the number of items in each window.
# Note: some items in ilist might be equal, and the number of windows should be less than n, namely, nwindows <= n.
#
# ilist: [i, ...], i is either left or right boundary
#
# Return n4windows
# n4windows: {k:n4window, ...}
# k: the boundary, namely, i in ilist
# n4window: number of items (varied boundaries, namely, potential copies) in the window
#
def ncopyByCutoff(ilist, cutoff=0):
ilist.sort()
gs = itertools.groupby(ilist)
windows = {}
kgs = []
for k,g in gs:
# requirement: k >= cutoff
if k <= cutoff:
start = 1
else:
start = k - cutoff
end = k + cutoff
windows[k] = (start, end)
kgs.append([k,list(g)])
# n4windows: {k:n4window, ...}
n4windows = {}
for k4win,window in windows.items():
n4windows[k4win] = 0
for k,g in kgs:
if window[0] <= k <= window[1]:
n4windows[k4win] += len(g)
return n4windows
def getbds4opt4start(n4windows, bds):
ks4ncopy = list(n4windows.items())
ks4ncopy.sort(key=operator.itemgetter(1), reverse=True)
# ks4ncopy: [(k,n4window), ...], sorted by n4window
bds4opt4k = []
# bds4opt4k: [bd, ...], boundaries with the most number of items in windows
# Note: there might be more than one window (k), which have the most number of items in the window
# bd: (start, end)
for bd in bds:
if bd[0] == ks4ncopy[0][0]:
bds4opt4k.append(bd)
return bds4opt4k
def getWindowKey4abundance(ilist):
cutoffs = set()
ilist.sort()
# get all possible cutoff values (distance between two items) between any two items in ilist.
for pair in itertools.combinations(ilist, 2):
cutoffs.add(pair[1]-pair[0])
# n4windows4cutoffs: {k:n4win, ...}
# n4win: is the total number of items in the window centered at k under all possible cutoffs.
n4windows4cutoffs = {}
for cutoff in cutoffs:
# n4windows: {k:n, ...}
# k: item in ilist
# n: number of items in the window centered at k under a cutoff
n4windows = ncopyByCutoff(ilist, cutoff)
for k,n in n4windows.items():
if k not in n4windows4cutoffs.keys():
n4windows4cutoffs[k] = 0
n4windows4cutoffs[k] += n
return n4windows4cutoffs
def consensusBoundaryByCutoffBySeparated(bds):
starts = []
ends = []
for bd in bds:
starts.append(bd[0])
ends.append(bd[1])
# Get the number of items in each window centered at the specific key (item in starts) of n4windows
n4windows = getWindowKey4abundance(starts)
# Sort start (left) boundaries to ensure the first boundary with the most number of items in window
# is the most left one when multiple items are maximal.
# It ensure that the representative bd is the longest bd.
#
# Get the key (the specific items with same value in starts) of the window with
# the most amount of items within the windows.
startboundary = max(sorted(n4windows.keys()), key = lambda x: n4windows[x])
# Get the number of items in each window centered at the specific key (item in ends) of n4windows
n4windows = getWindowKey4abundance(ends)
# Sort end (right) boundaries to ensure the first boundary with the most number of items in window
# is the most right one when multiple items are maximal.
# It ensure that the representative bd is the longest bd.
#
# Get the key (the specific items with same value in ends) of the window with
# the most amount of items within the windows.
endboundary = max(sorted(n4windows.keys(), reverse=True), key = lambda x: n4windows[x])
return (startboundary, endboundary)
def consensusBoundaryByCutoffByCombined(bds, cutoff=0):
starts = []
ends = []
for bd in bds:
starts.append(bd[0])
ends.append(bd[1])
n4windows = ncopyByCutoff(starts, cutoff)
# n4windows: {k:n4window, ...}
# k: the integer, namely, element in left starts (boundaries)
# n4window: number of items (integers) in the window
'''
# Sort start (left) boundaries to ensure the first boundary with the most number of items in window
# is the most left one when multiple items are maximal.
# It ensure that the representative bd is the longest bd.
n4windowsSorted = sorted(n4windows.keys())
# Get the key (the specific items with same value in starts) of the window with
# the most amount of items within the windows.
startboundary = max(n4windowsSorted, key = lambda x: n4windows[x])
'''
bds4opt4start = getbds4opt4start(n4windows, bds)
n4windows = ncopyByCutoff(ends, cutoff)
'''
# Sort end (right) boundaries to ensure the first boundary with the most number of items in window
# is the most right one when multiple items are maximal.
# It ensure that the representative bd is the longest bd.
n4windowsSorted = sorted(n4windows.keys(), reverse=True)
# Get the key (the specific items with same value in ends) of the window with
# the most amount of items within the windows.
endboundary = max(n4windowsSorted, key = lambda x: n4windows[x])
'''
bds4opt4end = getbds4opt4start(n4windows, bds)
# Get the bds with both optimal starts and ends
set4bds4opt4start = set(bds4opt4start)
set4bds4opt4end = set(bds4opt4end)
commonbds = set4bds4opt4start & set4bds4opt4end
# commonbds: {bd, ...}
ncommonbds = len(commonbds)
if ncommonbds > 0:
commonbdsSortByLen = sorted(commonbds, key = lambda x: x[1]-x[0], reverse = True)
# Get the representative bd, namely, the longest one among the bds with
# both optimal starts and ends (commonbds).
startboundary, endboundary = commonbdsSortByLen[0]
elif cutoff == 0:
# When ncommonbds == 0, cutoff == 0,
# get the representative bd, namely, the longest one among the bds wit
# both optimal starts and ends (commonbds).
noncommonbds = bds4opt4start + bds4opt4end
noncommonbdsSortByLen = sorted(noncommonbds, key = lambda x: x[1]-x[0], reverse = True)
startboundary, endboundary = noncommonbdsSortByLen[0]
else:
# When ncommonbds == 0, cutoff > 0,
# determine the representative bd by more strict cutoff, namely, cutoff - 1
startboundary, endboundary = consensusBoundaryByCutoff(bds4opt4start + bds4opt4end, cutoff - 1)
return (startboundary, endboundary)
# based on the bool value of constants.intersected2remove, choose the measure and threshold for
# clustering and removing intersected IS elements in the same genome sequence
# bd1, bd2: [start, end], boundary of IS element
def chooseMeasure(bd1, bd2):
intersect = intersection(bd1, bd2)
if constants.intersected2remove == True:
measure = intersect
threshold = constants.min4intersect
else:
measure = intersect / min(bd1[1]-bd1[0]+1, bd2[1]-bd2[0]+1)
threshold = constants.overlap2removeRedundancy
return (measure, threshold)
def getNewick(node, newick, parentDist, leafNames):
if node.is_leaf():
return '{}:{:.2f}{}'.format(leafNames[node.id], parentDist - node.dist, newick)
else:
if len(newick) > 0:
newick = '):{:.2f}{}'.format(parentDist - node.dist, newick)
else:
newick = ');'
newick = getNewick(node.get_left(), newick, node.dist, leafNames)
newick = getNewick(node.get_right(), ',{}'.format(newick), node.dist, leafNames)
newick = '({}'.format(newick)
return newick
# Convert a tree returned by scipy.cluster.hierarchy.to_tree() into a tree in newick format
# It is a recursive implementation which resultes stack overflow on a big data set. In python3,
# the maximum depth of recursive call is set to 1000.
#
# tree: tree structure returned by scipy.cluster.hierarchy.to_tree() from scipy package
# leafNames: [name_1, ..., name_m], names of the corresponding observations in m observation vectors in n dimensions (m*n array)
def linkageTree2newick(tree, leafNames):
return getNewick(tree, '', tree.dist, leafNames)
def linkageTree2newick_iter(tree, leafNames):
newick = ''
return newick
|
468892
|
import sys; sys.path.append('..') # make sure that pylsl is found (note: in a normal program you would bundle pylsl with the program)
import pylsl
import random
import time
# first create a new stream info (here we set the name to BioSemi, the content-type to EEG, 8 channels, 100 Hz, and float-valued data)
# The last value would be the serial number of the device or some other more or less locally unique identifier for the stream as far as available (you could also omit it but interrupted connections wouldn't auto-recover).
info = pylsl.stream_info('BioSemi','EEG',8,100,pylsl.cf_float32,'dsffwerwer');
# append some meta-data
info.desc().append_child_value("manufacturer","BioSemi")
channels = info.desc().append_child("channels")
for c in ["C3","C4","Cz","FPz","POz","CPz","O1","O2"]:
channels.append_child("channel").append_child_value("name",c).append_child_value("unit","microvolts").append_child_value("type","EEG")
# next make an outlet; we set the outgoing buffer size to 360 seconds (max.) and the transmission chunk size to 32 samples
outlet = pylsl.stream_outlet(info,360,32)
print("now sending data...")
while True:
# make a new random 8-channel sample; this is converted into a pylsl.vectorf (the data type that is expected by push_sample)
mysample = pylsl.vectorf([random.random(),random.random(),random.random(),random.random(),random.random(),random.random(),random.random(),random.random()])
# get a time stamp in seconds (we might modify this time stamp based on the true age of the sample, e.g. if it came from a measurement device, in case we can determine it)
now = pylsl.local_clock()
# now send it and wait for a bit
outlet.push_sample(mysample,now)
time.sleep(0.01)
|
468900
|
from flask_wtf import FlaskForm
from wtforms import BooleanField, HiddenField, StringField, SubmitField, ValidationError
from wtforms.validators import Length, NumberRange, Required
from .. models import LookupValue
class LookupValueForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
selectable = BooleanField("Selectable", validators = [NumberRange(min = 0)])
lookupValueId = HiddenField()
lookupId = HiddenField()
value = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
lookupValue = LookupValue.query.filter_by(LookupId = self.lookupId.data, Name = field.data).first()
if lookupValue:
if self.lookupValueId.data == "":
# Trying to add a new lookupValue using a name that already exists.
validationError = True
else:
if int(self.lookupValueId.data) != lookupValue.LookupValueId:
# Trying to change the name of a lookupValue to a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
|
468905
|
from django.db import models
from wagtailstreamforms.models import AbstractFormSetting, Form
from ..test_case import AppTestCase
from . import ValidFormSettingsModel
class ModelGenericTests(AppTestCase):
fixtures = ["test"]
def test_abstract(self):
self.assertTrue(AbstractFormSetting._meta.abstract)
def test_str(self):
model = ValidFormSettingsModel(form=Form.objects.get(pk=1))
self.assertEqual(model.__str__(), model.form.title)
class ModelFieldTests(AppTestCase):
def test_form(self):
field = self.get_field(AbstractFormSetting, "form")
self.assertModelField(field, models.OneToOneField)
self.assertEqual(field.remote_field.model, "wagtailstreamforms.Form")
self.assertEqual(field.remote_field.on_delete, models.CASCADE)
self.assertEqual(field.remote_field.related_name, "advanced_settings")
|
468918
|
from lib.models.gupnet import GUPNet
def build_model(cfg,mean_size):
if cfg['type'] == 'gupnet':
return GUPNet(backbone=cfg['backbone'], neck=cfg['neck'], mean_size=mean_size)
else:
raise NotImplementedError("%s model is not supported" % cfg['type'])
|
468921
|
import math
from chempy import Reaction
from chempy.units import allclose, default_units as u
from ..testing import requires
from ..rendering import eval_template
from ..parsing import get_parsing_context
from chempy.units import units_library
@requires(units_library)
def test_eval_template():
rendered = eval_template("${2*pi*arg*m**2}", arg=1 / math.pi)
val = eval(rendered, get_parsing_context())
assert allclose(val, 2 * u.m ** 2)
@requires(units_library)
def test_eval_template__Reaction():
rendered = eval_template("2 OH -> H2O2; ${6*pi*arg}/M/s", arg=1 / math.pi)
assert allclose(
Reaction.from_string(rendered).param,
Reaction.from_string("2 OH -> H2O2; 6.0/M/s").param,
)
|
468974
|
import enum
class EncodingStatusValues(enum.Enum):
RUNNING = 'RUNNING'
QUEUED = 'QUEUED'
CREATED = 'CREATED'
FINISHED = 'FINISHED'
ERROR = 'ERROR'
|
469033
|
import _init_paths
import os.path as osp
import os
import numpy as np
from sacred import Experiment
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from model.config import cfg as frcnn_cfg
from tracker.config import get_output_dir, get_tb_dir
#from tracker.alex import alex
from tracker.resnet import resnet50
from tracker.datasets.factory import Datasets
from tracker.triplet_loss import _get_anchor_positive_triplet_mask, _get_anchor_negative_triplet_mask
from torchvision.transforms import CenterCrop, Normalize, ToTensor, Compose, Resize, ToPILImage
from torch.autograd import Variable
ex = Experiment()
ex.add_config('output/tracker/pretrain_cnn/res50-bh4-all/sacred_config.yaml')
weights = 'output/tracker/pretrain_cnn/res50-bh4-all/ResNet_iter_25245.pth'
#ex.add_config('output/tracker/pretrain_cnn/bh4-smallTrain/sacred_config.yaml')
#weights = 'output/tracker/pretrain_cnn/bh4-smallTrain/ResNet_iter_25254.pth'
#ex.add_config('output/tracker/pretrain_cnn/marcuhmot_small/sacred_config.yaml')
#weights = 'output/tracker/pretrain_cnn/marcuhmot_small/ResNet_iter_26496.pth'
#ex.add_config('output/tracker/pretrain_cnn/marcuhmot_small/sacred_config.yaml')
#weights = 'output/tracker/pretrain_cnn/marcuhmot/ResNet_iter_27200.pth'
#ex.add_config('output/tracker/pretrain_cnn/kitti_bh_Car_1_2/sacred_config.yaml')
#weights = 'output/tracker/pretrain_cnn/kitti_bh_Car_1_2/ResNet_iters_25065.pth'
#ex.add_config('output/tracker/pretrain_cnn/kitti_small_bh_Car_1_2/sacred_config.yaml')
#weights = 'output/tracker/pretrain_cnn/kitti_small_bh_Car_1_2/ResNet_iters_24624.pth'
def build_crop(im_path, gt):
im = cv2.imread(im_path)
height, width, channels = im.shape
gt[0] = np.clip(gt[0], 0, width-1)
gt[1] = np.clip(gt[1], 0, height-1)
gt[2] = np.clip(gt[2], 0, width-1)
gt[3] = np.clip(gt[3], 0, height-1)
im = im[int(gt[1]):int(gt[3]), int(gt[0]):int(gt[2])]
im = cv2.resize(im, (128, 256), interpolation=cv2.INTER_LINEAR)
return im
def build_samples(data):
"""Builds the samples out of the sequence."""
tracks = {}
for t, sample in enumerate(data):
im_path = sample['im_path']
gt = sample['gt']
for k,v in tracks.items():
if k in gt.keys():
v.append({'t':t, 'id':k, 'im_path':im_path, 'gt':gt[k]})
del gt[k]
# For all remaining BB in gt new tracks are created
for k,v in gt.items():
tracks[k] = [{'t':t, 'id':k, 'im_path':im_path, 'gt':v}]
# sample max_per_person images and filter out tracks smaller than 4 samples
#outdir = get_output_dir("siamese_test")
res = []
for k,v in tracks.items():
l = len(v)
if l >= 2:
pers = []
for i in range(l):
pers.append([v[i]['t'], build_crop(v[i]['im_path'], v[i]['gt'])])
res.append(pers)
return res
@ex.automain
def my_main(_config, cnn):
print(_config)
##########################
# Initialize the modules #
##########################
print("[*] Building CNN")
network = resnet50(pretrained=True, **cnn['cnn'])
network.load_state_dict(torch.load(weights))
network.eval()
network.cuda()
#########################
# Initialize dataloader #
#########################
print("[*] Initializing Dataloader")
output_dir = osp.join(get_output_dir('MOT_analysis'), 'siamese_dist')
if not osp.exists(output_dir):
os.makedirs(output_dir)
results = []
for db in Datasets("mot_train_", {'vis_threshold':0.5}):
print("[*] Evaluating {}".format(db))
data = db.data
data = build_samples(data)
results_seq = []
for person in data:
images = []
times = []
transformation = Compose([ToTensor(), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
for sample in person:
im = cv2.cvtColor(sample[1], cv2.COLOR_BGR2RGB)
im = Image.fromarray(im)
im = transformation(im)
images.append(im)
times.append(sample[0])
images = torch.stack(images, 0)
embeddings = network(Variable(images.cuda(), volatile=True)).data.cpu()
n = embeddings.size(0)
m = embeddings.size(0)
d = embeddings.size(1)
x = embeddings.unsqueeze(1).expand(n, m, d)
y = embeddings.unsqueeze(0).expand(n, m, d)
dist = torch.sqrt(torch.pow(x - y, 2).sum(2))
res = []
for i in range(n):
for j in range(n):
if i < j:
res_x = times[j] - times[i]
res_y = dist[i,j]
if res_x <= 100:
res.append([res_x, res_y])
results_seq += res
results += results_seq
#r = np.array(results_seq)
# build values for plot
r = np.array(results)
x_max = 100
x_val = np.arange(1,x_max+1)
y_val = np.zeros(x_max)
y_std = np.zeros(x_max)
for x in x_val:
vals = r[r[:,0] == x, 1]
mean = np.mean(vals)
y_val[x-1] = mean
y_std[x-1] = np.sqrt(np.mean((vals-mean)**2))
#plt.scatter(x_val, y_val, s=1**2)
plt.errorbar(x_val, y_val, yerr=y_std, fmt='o')
plt.xlabel('frames distance')
plt.ylabel('feature distance')
plt.xlim((0, 100))
# calculate variance
#var_step = 10
#x_var = np.arange(var_step/2, x_max, 10)
#y_var = np.zeros(x_max//var_step)
#for x in x_var:
# vals = r[(r[:,0] > x-var_step/2) * (r[:,0] <= x+var_step/2), 1]
# y_val[x-1] = y
#plt.errorbar(x, y, yerr=yerr, fmt='o')
#plt.ylim((0,10))
#plt.savefig(osp.join(output_dir, "{}-{}.pdf".format(t, detections)), format='pdf')
#plt.close()
#plt.legend()
plt.savefig(osp.join(output_dir, "dist_err.pdf"), format='pdf')
plt.close()
|
469045
|
import gzip
import orjson
from somajo import SoMaJo
from tqdm import tqdm
import argparse
tokenizer = SoMaJo("de_CMC")
# see https://github.com/tsproisl/SoMaJo/issues/17
def detokenize(tokens):
out = []
for token in tokens:
if token.original_spelling is not None:
out.append(token.original_spelling)
else:
out.append(token.text)
if token.space_after:
out.append(" ")
return "".join(out)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
input_filename = args.filename
with gzip.open(input_filename, 'r') as f, \
gzip.open(input_filename + '-out.gz', 'wt') as output_file:
with tqdm(total=2980314) as pbar:
for line in f:
pbar.update(1)
line_dict = orjson.loads(line)
content = line_dict['raw_content']
language = line_dict['language']
if language == 'de':
sentences = tokenizer.tokenize_text([content], parallel=1)
for s in sentences:
sentence_string = detokenize(s)
output_file.write(sentence_string + '\n')
# split documents?
#output_file.write('\n')
else:
print('###################')
print(language)
print(content)
|
469079
|
class PNMessageCountResult(object):
def __init__(self, result):
"""
Representation of message count server response
:param result: result of message count operation
"""
self._result = result
self.channels = result['channels']
def __str__(self):
return "Message count for channels: {}".format(self.channels)
|
469089
|
import itertools
from omegaconf import OmegaConf
def bifpn_config(min_level, max_level, weight_method=None):
"""BiFPN config.
Adapted from https://github.com/google/automl/blob/56815c9986ffd4b508fe1d68508e268d129715c1/efficientdet/keras/fpn_configs.py
"""
p = OmegaConf.create()
weight_method = weight_method or 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)],
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
return p
def panfpn_config(min_level, max_level, weight_method=None):
"""PAN FPN config.
This defines FPN layout from Path Aggregation Networks as an alternate to
BiFPN, it does not implement the full PAN spec.
Paper: https://arxiv.org/abs/1803.01534
"""
p = OmegaConf.create()
weight_method = weight_method or 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level, min_level - 1, -1):
# top-down path.
offsets = [level_last_id(i), level_last_id(i + 1)] if i != max_level else [level_last_id(i)]
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': offsets,
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
for i in range(min_level, max_level + 1):
# bottom-up path.
offsets = [level_last_id(i), level_last_id(i - 1)] if i != min_level else [level_last_id(i)]
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': offsets,
'weight_method': weight_method,
})
node_ids[i].append(next(id_cnt))
return p
def qufpn_config(min_level, max_level, weight_method=None):
"""A dynamic quad fpn config that can adapt to different min/max levels.
It extends the idea of BiFPN, and has four paths:
(up_down -> bottom_up) + (bottom_up -> up_down).
Paper: https://ieeexplore.ieee.org/document/9225379
Ref code: From contribution to TF EfficientDet
https://github.com/google/automl/blob/eb74c6739382e9444817d2ad97c4582dbe9a9020/efficientdet/keras/fpn_configs.py
"""
p = OmegaConf.create()
weight_method = weight_method or 'fastattn'
quad_method = 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
level_first_id = lambda level: node_ids[level][0]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path 1.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(min_level + 1, max_level):
# bottom-up path 2.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
i = max_level
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [level_first_id(i)] + [level_last_id(i - 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(min_level + 1, max_level + 1, 1):
# bottom-up path 3.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [
level_first_id(i), level_last_id(i - 1) if i != min_level + 1 else level_first_id(i - 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(max_level - 1, min_level, -1):
# top-down path 4.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [node_ids[i][0]] + [node_ids[i][-1]] + [level_last_id(i + 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
i = min_level
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [node_ids[i][0]] + [level_last_id(i + 1)],
'weight_method': weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
# NOTE: the order of the quad path is reversed from the original, my code expects the output of
# each FPN repeat to be same as input from backbone, in order of increasing reductions
for i in range(min_level, max_level + 1):
# quad-add path.
p.nodes.append({
'reduction': 1 << i,
'inputs_offsets': [node_ids[i][2], node_ids[i][4]],
'weight_method': quad_method
})
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level=3, max_level=7):
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': bifpn_config(min_level=min_level, max_level=max_level, weight_method='sum'),
'bifpn_attn': bifpn_config(min_level=min_level, max_level=max_level, weight_method='attn'),
'bifpn_fa': bifpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'),
'pan_sum': panfpn_config(min_level=min_level, max_level=max_level, weight_method='sum'),
'pan_fa': panfpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'),
'qufpn_sum': qufpn_config(min_level=min_level, max_level=max_level, weight_method='sum'),
'qufpn_fa': qufpn_config(min_level=min_level, max_level=max_level, weight_method='fastattn'),
}
return name_to_config[fpn_name]
|
469092
|
from typing import List
import pytest
from gym.spaces import Discrete
from stable_baselines3.common.envs import IdentityEnv
from sb3_contrib.common.wrappers import ActionMasker
class IdentityEnvDiscrete(IdentityEnv):
def __init__(self, dim: int = 1, ep_length: int = 100):
"""
Identity environment for testing purposes
:param dim: the size of the dimensions you want to learn
:param ep_length: the length of each episode in timesteps
"""
space = Discrete(dim)
self.useless_property = 1
super().__init__(ep_length=ep_length, space=space)
def _action_masks(self) -> List[int]:
return [i == self.state for i in range(self.action_space.n)]
def action_mask_fn(env: IdentityEnvDiscrete) -> List[int]:
return [i == env.state for i in range(env.action_space.n)]
def test_wrapper_accepts_function():
"""
ActionMasker accepts a function
"""
env = IdentityEnvDiscrete()
assert not hasattr(env, "action_masks")
env = ActionMasker(env, action_mask_fn)
assert hasattr(env, "action_masks")
# Wrapper accepts as string name of a method on the underlying env
def test_wrapper_accepts_attr_name():
"""
ActionMasker accepts a string name of a method on the underlying env
"""
env = IdentityEnvDiscrete()
assert not hasattr(env, "action_masks")
env = ActionMasker(env, "_action_masks")
assert hasattr(env, "action_masks")
def test_attr_must_be_callable():
"""
Passing ActionMasker the string name of a non-callable is an error
"""
env = IdentityEnvDiscrete()
with pytest.raises(ValueError):
env = ActionMasker(env, "useless_property")
# Wrapper method returns expected results
def test_action_masks_returns_expected_result():
"""
ActionMasker-provided action_masks() method returns expected results
"""
env = IdentityEnvDiscrete()
env = ActionMasker(env, action_mask_fn)
# Only one valid action expected
masks = env.action_masks()
masks[env.state] = not masks[env.state] # Bit-flip the one expected valid action
assert all([not mask for mask in masks])
|
469113
|
import os, sys
parent_dir = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
sys.path.append(parent_dir)
from fsl import main
assert len(sys.argv) == 9
script, windowsSDK, dst, binary_dst, src, lang, compile, verbose, config = sys.argv
windowsSDKPaths = windowsSDK.split(';')
if windowsSDKPaths:
os.environ['FSL_COMPILER_FXC'] = windowsSDKPaths[0]
if 'Auto' == lang:
if config.endswith('Dx11'):
lang = 'DIRECT3D11'
elif config.endswith('Dx'):
lang = 'DIRECT3D12'
elif config.endswith('Vk'):
lang = 'VULKAN'
elif config.endswith('GLES'):
lang = 'GLES'
else:
print(src+': error FSL: Could not deduce target lang from current VS Config.')
sys.exit(1)
sys.argv = [
script,
src,
'-d', dst,
'-b', binary_dst,
'-l', lang,
]
if verbose=='true': sys.argv += ['--verbose']
if compile=='true': sys.argv += ['--compile']
sys.exit(main())
|
469124
|
import sublime, sublime_plugin, json
from urllib import urlopen
CFLIBCATS = r"http://www.cflib.org/api/api.cfc?method=getlibraries&returnformat=json"
CFLIBUDFS = r"http://www.cflib.org/api/api.cfc?method=getudfs&returnformat=json&libraryid="
CFLIBUDF = r"http://www.cflib.org/api/api.cfc?method=getudf&returnFormat=json&udfid="
class ShowCflibCommand(sublime_plugin.WindowCommand):
categories = []
udfs = []
def __init__(self, *args, **kwargs):
super(ShowCflibCommand, self).__init__(*args, **kwargs)
categories = []
udfs = []
def run(self):
self.getCategories()
self.window.show_quick_panel([[v] for k, v in (self.categories)], self.on_select_categories)
def getCategories(self):
d = json.load(urlopen(CFLIBCATS))
self.categories = d['DATA']
def getUdfs(self,index):
d = json.load(urlopen(CFLIBUDFS + str(self.categories[index][0])))
self.udfs = d['DATA']
def on_select_categories(self, index):
if index == -1:
return
self.getUdfs(index)
self.window.show_quick_panel([[v.strip(), c.strip()] for k, v, c in self.udfs], self.on_select_udf)
def on_select_udf(self, index):
if index == -1:
self.run()
else:
d = json.load(urlopen(CFLIBUDF + str(self.udfs[index][0])))
self.window.active_view().run_command("insert_udf", {"code":str(d['CODE'])})
class InsertUdfCommand(sublime_plugin.TextCommand):
def run(self, edit, code):
for region in self.view.sel():
self.view.replace(edit, region, code)
|
469207
|
from django.contrib import admin
from .models import DoorStatus, OpenData
@admin.register(DoorStatus)
class DoorStatusAdmin(admin.ModelAdmin):
fieldsets = [
(
"Status",
{
"fields": [
"name",
"datetime",
"status",
]
},
)
]
search_fields = ["title"]
@admin.register(OpenData)
class OpenDataAdmin(admin.ModelAdmin):
fieldsets = [
(
"Data",
{
"fields": [
"opened",
"closed",
]
},
)
]
search_fields = ["opened"]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.