content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def post_to_conf(post_grid, cell_size):
"""
Converts a N-dimensional grid of posterior values into a grid of confidence levels. The posterior values do not need
to be normalised, i.e. their distribution need not integrate to 1. Works with likelihood values (not log-likelihood)
instead of posteriors, assuming a flat prior.
Args:
post_grid (ND numpy array): Grid of posterior values.
cell_size (float): The size of a grid cell, e.g. for 2 dimensions x and y this would be dx*dy.
Returns:
ND numpy array: Grid of confidence levels, where the value at each point is the minimum confidence region that \
includes that point. The least likely point would have a value of 1, indicating that it is \
only included in the 100% confidence region and excluded from anything smaller.
"""
# Create flattened list of posteriors and sort in descending order
posteriors = post_grid.flatten()
posteriors[::-1].sort()
# Dictionary to contain mapping between posterior and confidence level
confidence_level_unnormalised = {}
# Calculate the cumulative integral of posterior values
integral = 0
for posterior in posteriors:
integral += posterior * cell_size
confidence_level_unnormalised[posterior] = integral
# Map each posterior in the grid to its confidence value
confidence_grid_unnormalised = np.vectorize(confidence_level_unnormalised.get)(post_grid)
# Normalise the confidence values using the final (complete) integral
confidence_grid_normalised = np.divide(confidence_grid_unnormalised, integral)
return confidence_grid_normalised | b4bcb8dddeceb7a4e1bb0914e503868e443ecb09 | 3,651,800 |
def get_fuzzer_display(testcase):
"""Return FuzzerDisplay tuple."""
if (testcase.overridden_fuzzer_name == testcase.fuzzer_name or
not testcase.overridden_fuzzer_name):
return FuzzerDisplay(
engine=None,
target=None,
name=testcase.fuzzer_name,
fully_qualified_name=testcase.fuzzer_name)
fuzz_target = get_fuzz_target(testcase.overridden_fuzzer_name)
if not fuzz_target:
# Legacy testcases.
return FuzzerDisplay(
engine=testcase.fuzzer_name,
target=testcase.get_metadata('fuzzer_binary_name'),
name=testcase.fuzzer_name,
fully_qualified_name=testcase.overridden_fuzzer_name)
return FuzzerDisplay(
engine=fuzz_target.engine,
target=fuzz_target.binary,
name=fuzz_target.engine,
fully_qualified_name=fuzz_target.fully_qualified_name()) | 273e0a2f92a4e24606908586111f1bad17e50b4c | 3,651,801 |
def process_articles_results(articles_list):
"""
Function that processes the articles result and transform them to a list of Objects
Args:
articles_list: A list of dictionaries that contain sources details
Returns :
articles_results: A list of source objects
"""
articles_results = []
for article_item in articles_list:
id = article_item.get('id')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
content = article_item.get('content')
if urlToImage:
article_object = Articles(id, author, title, description, url, urlToImage, publishedAt, content)
articles_results.append(article_object)
return articles_results | 7b4e540474757b2e0e9b93f66f5bee926992a782 | 3,651,802 |
def list_to_bytes_list(strList):
"""
This function turns an array of strings into a pointer array
with pointers pointing to the encodings of those strings
Possibly contained bytes are kept as they are.
:param strList: List of strings that shall be converted
:type strList: List of strings
:returns: Pointer array with pointers pointing to bytes
:raises: TypeError if strList is not list, set or tuple
"""
pList = c_char_p * len(strList)
# if strList is already a pointerarray or None, there is nothing to do
if isinstance(strList, (pList, type(None))):
return strList
if not isinstance(strList, (list, set, tuple)):
raise TypeError("strList must be list, set or tuple, not " +
str(type(strList)))
pList = pList()
for i, elem in enumerate(strList):
pList[i] = str_to_bytes(elem)
return pList | 19bcc6751e4805adcbfde54656ff83ef52ef02b8 | 3,651,803 |
def handle_td(element, box, _get_image_from_uri):
"""Handle the ``colspan``, ``rowspan`` attributes."""
if isinstance(box, boxes.TableCellBox):
# HTML 4.01 gives special meaning to colspan=0
# http://www.w3.org/TR/html401/struct/tables.html#adef-rowspan
# but HTML 5 removed it
# http://www.w3.org/TR/html5/tabular-data.html#attr-tdth-colspan
# rowspan=0 is still there though.
integer_attribute(element, box, 'colspan')
integer_attribute(element, box, 'rowspan', minimum=0)
return [box] | d3a2669ffc8ccac27d3b40c4f693751239b9c135 | 3,651,804 |
import pipes
def login_flags(db, host, port, user, db_prefix=True):
"""
returns a list of connection argument strings each prefixed
with a space and quoted where necessary to later be combined
in a single shell string with `"".join(rv)`
db_prefix determines if "--dbname" is prefixed to the db argument,
since the argument was introduced in 9.3.
"""
flags = []
if db:
if db_prefix:
flags.append(' --dbname={0}'.format(pipes.quote(db)))
else:
flags.append(' {0}'.format(pipes.quote(db)))
if host:
flags.append(' --host={0}'.format(host))
if port:
flags.append(' --port={0}'.format(port))
if user:
flags.append(' --username={0}'.format(user))
return flags | 2c844def8e6f1154a9962d43c858b39b9a7adf2a | 3,651,805 |
def glplot(ncfile, times, colora, label):
"""
add a plot of grounding line points to current axes.
makes use of the numpy.ma.MaskedArray when reading xGL,yGL
"""
try:
ncid = Dataset(ncfile, 'r')
except:
print("Failed to open file: {}. Skipping.".format(ncfile))
return 350.0, 500.0
time = ncid.variables["time"][:]
lxmax = 0.0
lxmin = 800.0
for i in range(0, len(times)):
seq = (time == times[i])
xGL = ncid.variables["xGL"][:, seq]*1e-3
lxmax = max(np.max(xGL), lxmax)
lxmin = min(np.min(xGL), lxmin)
yGL = ncid.variables["yGL"][:, seq]*1e-3
plt.plot(xGL, yGL, 's', ms=3, mfc=colora[i],
mec=colora[i], label=label + ', t = ' + format(times[i]))
return lxmin, lxmax | 149836ceb0f6b65ba792bffcbdafad6fe8702f62 | 3,651,806 |
def roi_intersect(a, b):
"""
Compute intersection of two ROIs.
.. rubric:: Examples
.. code-block::
s_[1:30], s_[20:40] => s_[20:30]
s_[1:10], s_[20:40] => s_[10:10]
# works for N dimensions
s_[1:10, 11:21], s_[8:12, 10:30] => s_[8:10, 11:21]
"""
def slice_intersect(a, b):
if a.stop < b.start:
return slice(a.stop, a.stop)
if a.start > b.stop:
return slice(a.start, a.start)
_in = max(a.start, b.start)
_out = min(a.stop, b.stop)
return slice(_in, _out)
if isinstance(a, slice):
if not isinstance(b, slice):
b = b[0]
return slice_intersect(a, b)
b = (b,) if isinstance(b, slice) else b
return tuple(slice_intersect(sa, sb) for sa, sb in zip(a, b)) | d1070c8ec0c493296dfee6bdc54b7430e703bda8 | 3,651,807 |
def _flows_finished(pgen_grammar, stack):
"""
if, while, for and try might not be finished, because another part might
still be parsed.
"""
for stack_node in stack:
if stack_node.nonterminal in ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt'):
return False
return True | dd0fe435d1328b3ae83ba2507006b6825ca23087 | 3,651,808 |
def PositionToPercentile(position, field_size):
"""Converts from position in the field to percentile.
position: int
field_size: int
"""
beat = field_size - position + 1
percentile = 100.0 * beat / field_size
return percentile | c75869f3d7f8437f28d3463fcf12b2b446fe930a | 3,651,809 |
def grid(num, ndim, large=False):
"""Build a uniform grid with num points along each of ndim axes."""
if not large:
_check_not_too_large(np.power(num, ndim) * ndim)
x = np.linspace(0, 1, num, dtype='float64')
w = 1 / (num - 1)
points = np.stack(
np.meshgrid(*[x for _ in range(ndim)], indexing='ij'), axis=-1)
return points, w | 51a3ef70da4581a774d76839d05a14042e7bf78c | 3,651,810 |
def rolling_outlier_quantile(x, width, q, m):
"""Detect outliers by multiples of a quantile in a window.
Outliers are the array elements outside `m` times the `q`'th
quantile of deviations from the smoothed trend line, as calculated from
the trend line residuals. (For example, take the magnitude of the 95th
quantile times 5, and mark any elements greater than that value as
outliers.)
This is the smoothing method used in BIC-seq (doi:10.1073/pnas.1110574108)
with the parameters width=200, q=.95, m=5 for WGS.
Returns
-------
np.array
A boolean array of the same size as `x`, where outlier indices are True.
"""
if len(x) <= width:
return np.zeros(len(x), dtype=np.bool_)
dists = np.abs(x - savgol(x, width))
quants = rolling_quantile(dists, width, q)
outliers = (dists > quants * m)
return outliers | 3c28fa245c8dfce03958dee33c47828eb38ac979 | 3,651,811 |
def compute_region_classification_len(dataset_output,
dataset_type: str):
"""
Compute the number of points per class and return a dictionary (dataset_type specifies the keys) with the results
"""
stable_region_indices, marginal_stable_region_indices, marginal_region_indices, marginal_unstable_region_indices, unstable_region_indices = compute_regions_belongings(
value=dataset_output)
region_len_dict = {f"len_{dataset_type}_stable_region": sum(stable_region_indices),
f"len_{dataset_type}_marginal_stable_region": sum(marginal_stable_region_indices),
f"len_{dataset_type}_marginal_region": sum(marginal_region_indices),
f"len_{dataset_type}_marginal_unstable_region": sum(marginal_unstable_region_indices),
f"len_{dataset_type}_unstable_region": sum(unstable_region_indices),
}
return region_len_dict | ba78d7d000b97cfcefa2acac263612ebd4aff377 | 3,651,812 |
def set_world_properties(world_uid, world_name=None, owner=None, config=None):
""" Set the properties of the given world """
return runtime.set_world_properties(world_uid, world_name, owner, config) | 4c063554390c0fb33ec74394a5a7cc967d55211d | 3,651,813 |
def _resize_and_pad(img, desired_size):
"""
Resize an image to the desired width and height
:param img:
:param desired_size:
:return:
"""
old_size = img.shape[:2] # old_size is in (height, width) format
ratio = float(desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])
if new_size[0] == 0:
new_size = (new_size[0] + 1, new_size[1])
if new_size[1] == 0:
new_size = (new_size[0], new_size[1] + 1)
# New_size should be in (width, height) format
im = cv2.resize(img, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
left, right = delta_w // 2, delta_w - (delta_w // 2)
color = [0, 0, 0]
img = cv2.copyMakeBorder(im, top, bottom, left, right,
cv2.BORDER_CONSTANT,
value=color)
return img | 1053748c0a303e3b5b3712623a089e42ba822301 | 3,651,814 |
from typing import Tuple
import os
from typing import cast
def _terminal_size(fallback: Tuple[int, int]) -> Tuple[int, int]:
"""
Try to get the size of the terminal window.
If it fails, the passed fallback will be returned.
"""
for i in (0, 1):
try:
window_width = os.get_terminal_size(i)
return cast(Tuple[int, int], tuple(window_width))
except OSError:
continue
return fallback | 3254068444167bad0b87479001b0c22887b32a60 | 3,651,815 |
from cntk.ops.cntk2 import Dropout
def dropout(x, name=None):
"""
Compute a new tensor with `dropoutRate` perecent set to zero. The values
that are set to zero are randomly chosen. This is commonly used to prevent
overfitting during the training process.
The output tensor has the same shape as `x`, but with `dropoutRate` of the
elements set to zero (droped out).
Args:
x: source tensor
name (str): the name of the node in the network
Returns:
:class:`cntk.graph.ComputationNode`
"""
op = Dropout(x, name = name)
wrap_numpy_arrays(op)
op.rank = op._.rank
return op | ae688aa478842ba451b92de2bc0503e42f1a9363 | 3,651,816 |
def mol2graph(crystal_batch: CrystalDataset, args: Namespace) -> BatchMolGraph:
"""
Converts a list of SMILES strings to a BatchMolGraph containing the batch of molecular graphs.
:param crystal_batch: a list of CrystalDataset
:param args: Arguments.
:return: A BatchMolGraph containing the combined molecular graph for the molecules
"""
crystal_graphs = list()
for crystal_point in crystal_batch:
if crystal_point in CRYSTAL_TO_GRAPH.keys():
crystal_graph = CRYSTAL_TO_GRAPH[crystal_point]
else:
crystal_graph = MolGraph(crystal_point, args)
if not args.no_cache and len(CRYSTAL_TO_GRAPH) <= 10000:
CRYSTAL_TO_GRAPH[crystal_point] = crystal_graph
crystal_graphs.append(crystal_graph)
return BatchMolGraph(crystal_graphs, args) | 604351ad5ae6c1ccfa6ce01a1e7b03c5e80ff2a4 | 3,651,817 |
import ast
def _compile(s: str):
"""compiles string into AST.
:param s: string to be compiled into AST.
:type s: str
"""
return compile(
source = s,
filename = '<unknown>',
mode = 'eval',
flags = ast.PyCF_ONLY_AST,
) | 4709cfa84ab6e5d7210924cf3aa206a1d297b7bd | 3,651,818 |
def temp_get_users_with_permission_form(self):
"""Used to test that swapping the Form method works"""
# Search string: ABC
return () | 72390791304d62fc5d78720aac4e2807e918587c | 3,651,819 |
def create_resource_types(raml_data, root):
"""
Parse resourceTypes into ``ResourceTypeNode`` objects.
:param dict raml_data: Raw RAML data
:param RootNode root: Root Node
:returns: list of :py:class:`.raml.ResourceTypeNode` objects
"""
# TODO: move this outside somewhere - config?
accepted_methods = root.config.get("http_optional")
#####
# Helper functions
#####
def get_union(resource, method, inherited):
union = {}
for key, value in list(iteritems(inherited)):
if resource.get(method) is not None:
if key not in list(iterkeys(resource.get(method, {}))):
union[key] = value
else:
resource_values = resource.get(method, {}).get(key)
inherited_values = inherited.get(key, {})
union[key] = dict(list(iteritems(resource_values)) +
list(iteritems(inherited_values)))
if resource.get(method) is not None:
for key, value in list(iteritems(resource.get(method, {}))):
if key not in list(iterkeys(inherited)):
union[key] = value
return union
def get_inherited_resource(res_name):
for resource in resource_types:
if res_name == list(iterkeys(resource))[0]:
return resource
def get_inherited_type(root, resource, type, raml):
inherited = get_inherited_resource(type)
res_type_objs = []
for key, value in list(iteritems(resource)):
for i in list(iterkeys(value)):
if i in accepted_methods:
data_union = get_union(
value, i, list(itervalues(inherited))[0].get(i, {})
)
# res = wrap(key, data_union, i)
res = ResourceTypeNode(
name=key,
raw=data_union,
root=root,
headers=headers(data_union.get("headers", {})),
body=body(data_union.get("body", {})),
responses=responses(data_union),
uri_params=uri_params(data_union),
base_uri_params=base_uri_params(data_union),
query_params=query_params(data_union),
form_params=form_params(data_union),
media_type=media_type(),
desc=description(),
type=type_(),
method=method(i),
usage=usage(),
optional=optional(),
is_=is_(data_union),
traits=traits(data_union),
secured_by=secured_by(data_union),
security_schemes=security_schemes(data_union),
display_name=display_name(data_union, key),
protocols=protocols(data_union)
)
res_type_objs.append(res)
return res_type_objs
def get_scheme(item):
schemes = raml_data.get("securitySchemes", [])
for s in schemes:
if item == list(iterkeys(s))[0]:
return s
def get_inherited_type_params(data, attribute, params):
inherited = get_inherited_resource(data.get("type"))
inherited = inherited.get(data.get("type"))
inherited_params = inherited.get(attribute, {})
return dict(list(iteritems(params)) +
list(iteritems(inherited_params)))
def get_attribute(res_data, method_data, item, default={}):
method_level = _get(method_data, item, default)
resource_level = _get(res_data, item, default)
return method_level, resource_level
def get_inherited_item(items, item_name):
inherited = get_inherited_resource(v.get("type"))
resource = inherited.get(v.get("type"))
res_level = resource.get(meth, {}).get(item_name, {})
method = resource.get(meth, {})
method_level = method.get(item_name, {})
items = dict(
list(iteritems(items)) +
list(iteritems(res_level)) +
list(iteritems(method_level))
)
return items
def get_attribute_dict(data, item):
resource_level = _get(v, item, {})
method_level = _get(data, item, {})
return dict(list(iteritems(resource_level)) +
list(iteritems(method_level)))
#####
# Set ResourceTypeNode attributes
#####
def display_name(data, name):
return data.get("displayName", name)
def headers(data):
_headers = _get(data, "headers", {})
if _get(v, "type"):
_headers = get_inherited_item(_headers, "headers")
header_objs = _create_base_param_obj(_headers, Header, root.config)
if header_objs:
for h in header_objs:
h.method = method(meth)
return header_objs
def body(data):
_body = _get(data, "body", default={})
if _get(v, "type"):
_body = get_inherited_item(_body, "body")
body_objects = []
for key, value in list(iteritems(_body)):
body = Body(
mime_type=key,
raw=value,
schema=load_schema(value.get("schema")),
example=load_schema(value.get("example")),
form_params=value.get("formParameters"),
config=root.config
)
body_objects.append(body)
return body_objects or None
def responses(data):
response_objects = []
_responses = _get(data, "responses", {})
if _get(v, "type"):
_responses = get_inherited_item(_responses, "responses")
for key, value in list(iteritems(_responses)):
_headers = data.get("responses", {}).get(key, {})
_headers = _get(_headers, "headers", {})
header_objs = _create_base_param_obj(_headers, Header, root.config)
if header_objs:
for h in header_objs:
h.method = method(meth)
response = Response(
code=key,
raw={key: value},
desc=_get(value, "description"),
headers=header_objs,
body=body(value),
config=root.config,
method=method(meth)
)
response_objects.append(response)
if response_objects:
return sorted(response_objects, key=lambda x: x.code)
return None
def uri_params(data):
uri_params = get_attribute_dict(data, "uriParameters")
if _get(v, "type"):
uri_params = get_inherited_type_params(v, "uriParameters",
uri_params)
return _create_base_param_obj(uri_params, URIParameter, root.config)
def base_uri_params(data):
uri_params = get_attribute_dict(data, "baseUriParameters")
return _create_base_param_obj(uri_params, URIParameter, root.config)
def query_params(data):
query_params = get_attribute_dict(data, "queryParameters")
if _get(v, "type"):
query_params = get_inherited_type_params(v, "queryParameters",
query_params)
return _create_base_param_obj(query_params, QueryParameter,
root.config)
def form_params(data):
form_params = get_attribute_dict(data, "formParameters")
if _get(v, "type"):
form_params = get_inherited_type_params(v, "formParameters",
form_params)
return _create_base_param_obj(form_params, FormParameter, root.config)
def media_type():
return _get(v, "mediaType")
def description():
return _get(v, "description")
def type_():
return _get(v, "type")
def method(meth):
if not meth:
return None
if "?" in meth:
return meth[:-1]
return meth
def usage():
return _get(v, "usage")
def optional():
if meth:
return "?" in meth
def protocols(data):
m, r = get_attribute(v, data, "protocols", None)
if m:
return m
return r
def is_(data):
m, r = get_attribute(v, data, "is", default=[])
return m + r or None
def get_trait(item):
traits = raml_data.get("traits", [])
for t in traits:
if item == list(iterkeys(t))[0]:
return t
# TODO: clean up
def traits(data):
assigned = is_(data)
if assigned:
trait_objs = []
for item in assigned:
assigned_trait = get_trait(item)
raw_data = list(itervalues(assigned_trait))[0]
trait = TraitNode(
name=list(iterkeys(assigned_trait))[0],
raw=raw_data,
root=root,
headers=headers(raw_data),
body=body(raw_data),
responses=responses(raw_data),
uri_params=uri_params(raw_data),
base_uri_params=base_uri_params(raw_data),
query_params=query_params(raw_data),
form_params=form_params(raw_data),
media_type=media_type(),
desc=description(),
usage=usage(),
protocols=protocols(raw_data)
)
trait_objs.append(trait)
return trait_objs
return None
def secured_by(data):
m, r = get_attribute(v, data, "securedBy", [])
return m + r or None
def security_schemes(data):
secured = secured_by(data)
if secured:
secured_objs = []
for item in secured:
assigned_scheme = get_scheme(item)
raw_data = list(itervalues(assigned_scheme))[0]
scheme = SecurityScheme(
name=list(iterkeys(assigned_scheme))[0],
raw=raw_data,
type=raw_data.get("type"),
described_by=raw_data.get("describedBy"),
desc=raw_data.get("description"),
settings=raw_data.get("settings"),
config=root.config
)
secured_objs.append(scheme)
return secured_objs
return None
def wrap(key, data, meth, v):
return ResourceTypeNode(
name=key,
raw=data,
root=root,
headers=headers(data),
body=body(data),
responses=responses(data),
uri_params=uri_params(data),
base_uri_params=base_uri_params(data),
query_params=query_params(data),
form_params=form_params(data),
media_type=media_type(),
desc=description(),
type=type_(),
method=method(meth),
usage=usage(),
optional=optional(),
is_=is_(data),
traits=traits(data),
secured_by=secured_by(data),
security_schemes=security_schemes(data),
display_name=display_name(data, key),
protocols=protocols(data)
)
resource_types = raml_data.get("resourceTypes", [])
resource_type_objects = []
for res in resource_types:
for k, v in list(iteritems(res)):
if isinstance(v, dict):
if "type" in list(iterkeys(v)):
r = get_inherited_type(root, res, v.get("type"), raml_data)
resource_type_objects.extend(r)
else:
for meth in list(iterkeys(v)):
if meth in accepted_methods:
method_data = v.get(meth, {})
resource = wrap(k, method_data, meth, v)
resource_type_objects.append(resource)
else:
meth = None
resource = wrap(k, {}, meth, v)
resource_type_objects.append(resource)
return resource_type_objects or None | e3858924b842b3d3d6a7bbd3b0a2783a0b054a06 | 3,651,820 |
def permute_images(images, permutation_index):
"""
Permute pixels in all images.
:param images: numpy array of images
:param permutation_index: index of the permutation (#permutations = #tasks - 1)
:return: numpy array of permuted images (of the same size)
"""
# seed = np.random.randint(low=4294967295, dtype=np.uint32) # make a random seed for all images in an array
# baseline and superposition have the same permutation of images for the corresponding task
global seeds
seed = seeds[permutation_index] # the same permutation each run for the first, second, ... task
return np.array([permute_pixels(im, seed) for im in images]) | 5742c9c2bce5012b0c17b60eb5e66328b91e53b4 | 3,651,821 |
def new_user(request, id):
"""
Page for creating users after registering a person.
person must be either volunteer, NGO employee or Government
"""
msg = ''
password = ''
try:
person_id = int(id)
# Get Name
user = RegPerson.objects.get(pk=person_id)
personfname = user.first_name
personsname = user.surname
names = user.full_name
if request.method == 'POST':
form = NewUser(user, data=request.POST)
username = request.POST.get('username')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
# resolve existing account
user_exists = AppUser.objects.filter(reg_person=person_id)
if user_exists:
msg = 'Person ({} {}) has an existing user account.'.format(
personfname, personsname)
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(reverse(persons_search))
if password1 == password2:
password = password1
else:
msg = 'Passwords do not match!'
messages.add_message(request, messages.INFO, msg)
form = NewUser(user, data=request.POST)
return render(request, 'registry/new_user.html',
{'form': form}, )
# validate username if__exists
username_exists = AppUser.objects.filter(username__iexact=username)
if username_exists:
msg = 'Username ({}) is taken. Pick another one.'.format(
username)
messages.add_message(request, messages.INFO, msg)
form = NewUser(user, data=request.POST)
return render(request, 'registry/new_user.html',
{'form': form}, )
else:
# Create User
user = AppUser.objects.create_user(username=username,
reg_person=person_id,
password=password)
if user:
user.groups.add(Group.objects.get(
name='Standard logged in'))
# Capture msg & op status
msg = 'User ({}) save success.'.format(username)
messages.add_message(request, messages.INFO, msg)
return HttpResponseRedirect(
'%s?id=%d' % (reverse(persons_search), int(person_id)))
else:
form = NewUser(user)
return render(request, 'registry/new_user.html',
{'names': names, 'form': form}, )
except Exception as e:
msg = 'Error - ({}) '.format(str(e))
messages.add_message(request, messages.ERROR, msg)
return HttpResponseRedirect(reverse(persons_search)) | 26ff9e3fa289915218a6f60e138a3491955c0228 | 3,651,822 |
def multi_conv(func=None, options=None):
"""A function decorator for generating multi-convolution operations.
Multi-convolutions allow for a set of data-independent convolutions to be
executed in parallel. Executing convolutions in parallel can lead to an
increase in the data throughput.
The ``multi_conv`` function decorator is a convenient way to generate
multi-convolutions - it detects all the convolution operations inside of the
decorated function and executes them in parallel.
For example:
.. code-block:: python
from tensorflow import keras
from tensorflow.python import ipu
@ipu.nn_ops.multi_conv
def convs(x, y, z):
x = keras.layers.DepthwiseConv2D(8, 2, depth_multiplier=2)(x)
y = keras.layers.DepthwiseConv2D(16, 4, depth_multiplier=2)(y)
z = keras.layers.Conv2D(8, 3)(z)
return x, y, z
Will detect and execute the three convolutions ``x``, ``y`` and ``z`` in
parallel.
Note that any operations which are not convolutions, such as bias add
operations, will be executed in the same way as if they were not inside of a
``multi_conv`` decorated function.
It is also possible to set PopLibs multi-convolution options using this
decorator.
For example:
.. code-block:: python
from tensorflow import keras
from tensorflow.python import ipu
@ipu.nn_ops.multi_conv(options={"perConvReservedTiles":"50"})
def convs(x, y, z):
x = keras.layers.DepthwiseConv2D(8, 2, depth_multiplier=2)(x)
y = keras.layers.DepthwiseConv2D(16, 4, depth_multiplier=2)(y)
z = keras.layers.Conv2D(8, 3)(z)
return x, y, z
See the PopLibs documention for the list of all available flags.
Note that these options will also be applied to the gradient operations
generated during backpropagation.
Args:
func: A python function which takes a list of positional arguments only. All
the arguments must be `tf.Tensor`-like objects, or be convertible to them.
The function provided must return at least one `tf.Tensor`-like object.
options: A dictionary of Poplar option flags for multi-convolution. See the
multi-convolution PopLibs documentation for available flags.
"""
def decorated(inner_func):
def multi_conv_wrapper(*args):
inner_options = options if options else {}
if not isinstance(inner_options, dict):
raise TypeError(
"Expected the multi_conv `options` to be a `dict`, but got %s "
"instead." % (str(inner_options)))
option_proto = option_flag_pb2.PoplarOptionFlags()
for key, value in inner_options.items():
flag = option_proto.flags.add()
flag.option = key
flag.value = value
def func_wrapper(*args):
with ops.get_default_graph().as_default() as g:
with g.gradient_override_map(_gradient_override_map):
return inner_func(*args)
args = functional_ops._convert_to_list(args) # pylint: disable=protected-access
with ops.name_scope("multi_conv") as scope:
func_graph, captured_args = functional_ops._compile_function( # pylint: disable=protected-access
func_wrapper,
args,
scope, [],
allow_external_captures=True)
with ops.control_dependencies(list(func_graph.control_captures)):
outputs = gen_functional_ops.multi_conv(
captured_args,
to_apply=util.create_new_tf_function(func_graph),
Tout=func_graph.output_types,
output_shapes=func_graph.output_shapes,
option_flags=json_format.MessageToJson(option_proto))
return func_graph_module.pack_sequence_as(func_graph.structured_outputs,
outputs)
return multi_conv_wrapper
if func is not None:
return decorated(func)
return decorated | d1c9a69fbcec7b374142bc7568fc89ba8dddb0b9 | 3,651,823 |
def hi_joseangel():
""" Hi Jose Angel Function """
return "hi joseangel!" | 5889a51977d3ec2269040a9a7e7968801209ff25 | 3,651,824 |
import time
def received_date_date(soup):
"""
Find the received date received_date_date in human readable form
"""
received_date = get_history_date(soup, date_type = "received")
date_string = None
try:
date_string = time.strftime("%B %d, %Y", received_date)
except(TypeError):
# Date did not convert
pass
return date_string | 3963d846a64e06ed0d2e60b7ecba26efcd4d9e6e | 3,651,825 |
def is_on(hass, entity_id):
""" Returns if the group state is in its ON-state. """
state = hass.states.get(entity_id)
if state:
group_type = _get_group_type(state.state)
# If we found a group_type, compare to ON-state
return group_type and state.state == _GROUP_TYPES[group_type][0]
return False | 8e77a7a3f4a09d68d92d58105b3d5a36c830cd0c | 3,651,826 |
def pytest_report_header(config, startdir):
"""return a string to be displayed as header info for terminal reporting."""
capabilities = config.getoption('capabilities')
if capabilities:
return 'capabilities: {0}'.format(capabilities) | 4e6ada67f5f08c1db8f5b6206089db4e3ee84f46 | 3,651,827 |
def chessboard_distance(x_a, y_a, x_b, y_b):
"""
Compute the rectilinear distance between
point (x_a,y_a) and (x_b, y_b)
"""
return max(abs(x_b-x_a),abs(y_b-y_a)) | 9b11bf328faf3b231df23585914f20c2efd02bf9 | 3,651,828 |
from pgmpy.factors import TabularCPD
from pgmpy.models import BayesianModel
import pandas as pd
from pgmpy.inference import VariableElimination # NOQA
from pgmpy.factors import TabularCPD
import pgmpy
import plottool as pt
import networkx as netx
def bayesnet():
"""
References:
https://class.coursera.org/pgm-003/lecture/17
http://www.cs.ubc.ca/~murphyk/Bayes/bnintro.html
http://www3.cs.stonybrook.edu/~sael/teaching/cse537/Slides/chapter14d_BP.pdf
http://www.cse.unsw.edu.au/~cs9417ml/Bayes/Pages/PearlPropagation.html
https://github.com/pgmpy/pgmpy.git
http://pgmpy.readthedocs.org/en/latest/
http://nipy.bic.berkeley.edu:5000/download/11
"""
# import operator as op
# # Enumerate all possible events
# varcard_list = list(map(op.attrgetter('variable_card'), cpd_list))
# _esdat = list(ut.iprod(*map(range, varcard_list)))
# _escol = list(map(op.attrgetter('variable'), cpd_list))
# event_space = pd.DataFrame(_esdat, columns=_escol)
# # Custom compression of event space to inspect a specific graph
# def compress_space_flags(event_space, var1, var2, var3, cmp12_):
# """
# var1, var2, cmp_ = 'Lj', 'Lk', op.eq
# """
# import vtool as vt
# data = event_space
# other_cols = ut.setdiff_ordered(data.columns.tolist(), [var1, var2, var3])
# case_flags12 = cmp12_(data[var1], data[var2]).values
# # case_flags23 = cmp23_(data[var2], data[var3]).values
# # case_flags = np.logical_and(case_flags12, case_flags23)
# case_flags = case_flags12
# case_flags = case_flags.astype(np.int64)
# subspace = np.hstack((case_flags[:, None], data[other_cols].values))
# sel_ = vt.unique_row_indexes(subspace)
# flags = np.logical_and(mask, case_flags)
# return flags
# # Build special cases
# case_same = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.eq)]
# case_diff = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.ne)]
# special_cases = [
# case_same,
# case_diff,
# ]
name_nice = ['n1', 'n2', 'n3']
score_nice = ['low', 'high']
match_nice = ['diff', 'same']
num_names = len(name_nice)
num_scores = len(score_nice)
nid_basis = list(range(num_names))
score_basis = list(range(num_scores))
semtype2_nice = {
'score': score_nice,
'name': name_nice,
'match': match_nice,
}
var2_cpd = {
}
globals()['semtype2_nice'] = semtype2_nice
globals()['var2_cpd'] = var2_cpd
name_combo = np.array(list(ut.iprod(nid_basis, nid_basis)))
combo_is_same = name_combo.T[0] == name_combo.T[1]
def get_expected_scores_prob(level1, level2):
part1 = combo_is_same * level1
part2 = (1 - combo_is_same) * (1 - (level2))
expected_scores_level = part1 + part2
return expected_scores_level
# def make_cpd():
def name_cpd(aid):
cpd = TabularCPD(
variable='N' + aid,
variable_card=num_names,
values=[[1.0 / num_names] * num_names])
cpd.semtype = 'name'
return cpd
name_cpds = [name_cpd('i'), name_cpd('j'), name_cpd('k')]
var2_cpd.update(dict(zip([cpd.variable for cpd in name_cpds], name_cpds)))
if True:
num_same_diff = 2
samediff_measure = np.array([
# get_expected_scores_prob(.12, .2),
# get_expected_scores_prob(.88, .8),
get_expected_scores_prob(0, 0),
get_expected_scores_prob(1, 1),
])
samediff_vals = (samediff_measure / samediff_measure.sum(axis=0)).tolist()
def samediff_cpd(aid1, aid2):
cpd = TabularCPD(
variable='A' + aid1 + aid2,
variable_card=num_same_diff,
values=samediff_vals,
evidence=['N' + aid1, 'N' + aid2], # [::-1],
evidence_card=[num_names, num_names]) # [::-1])
cpd.semtype = 'match'
return cpd
samediff_cpds = [samediff_cpd('i', 'j'), samediff_cpd('j', 'k'), samediff_cpd('k', 'i')]
var2_cpd.update(dict(zip([cpd.variable for cpd in samediff_cpds], samediff_cpds)))
if True:
def score_cpd(aid1, aid2):
semtype = 'score'
evidence = ['A' + aid1 + aid2, 'N' + aid1, 'N' + aid2]
evidence_cpds = [var2_cpd[key] for key in evidence]
evidence_nice = [semtype2_nice[cpd.semtype] for cpd in evidence_cpds]
evidence_card = list(map(len, evidence_nice))
evidence_states = list(ut.iprod(*evidence_nice))
variable_basis = semtype2_nice[semtype]
variable_values = []
for mystate in variable_basis:
row = []
for state in evidence_states:
if state[0] == state[1]:
if state[2] == 'same':
val = .2 if mystate == 'low' else .8
else:
val = 1
# val = .5 if mystate == 'low' else .5
elif state[0] != state[1]:
if state[2] == 'same':
val = .5 if mystate == 'low' else .5
else:
val = 1
# val = .9 if mystate == 'low' else .1
row.append(val)
variable_values.append(row)
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=len(variable_basis),
values=variable_values,
evidence=evidence, # [::-1],
evidence_card=evidence_card) # [::-1])
cpd.semtype = semtype
return cpd
else:
score_values = [
[.8, .1],
[.2, .9],
]
def score_cpd(aid1, aid2):
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=num_scores,
values=score_values,
evidence=['A' + aid1 + aid2], # [::-1],
evidence_card=[num_same_diff]) # [::-1])
cpd.semtype = 'score'
return cpd
score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')]
cpd_list = name_cpds + score_cpds + samediff_cpds
else:
score_measure = np.array([get_expected_scores_prob(level1, level2)
for level1, level2 in
zip(np.linspace(.1, .9, num_scores),
np.linspace(.2, .8, num_scores))])
score_values = (score_measure / score_measure.sum(axis=0)).tolist()
def score_cpd(aid1, aid2):
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=num_scores,
values=score_values,
evidence=['N' + aid1, 'N' + aid2],
evidence_card=[num_names, num_names])
cpd.semtype = 'score'
return cpd
score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')]
cpd_list = name_cpds + score_cpds
pass
input_graph = []
for cpd in cpd_list:
if cpd.evidence is not None:
for evar in cpd.evidence:
input_graph.append((evar, cpd.variable))
name_model = BayesianModel(input_graph)
name_model.add_cpds(*cpd_list)
var2_cpd.update(dict(zip([cpd.variable for cpd in cpd_list], cpd_list)))
globals()['var2_cpd'] = var2_cpd
varnames = [cpd.variable for cpd in cpd_list]
# --- PRINT CPDS ---
cpd = score_cpds[0]
def print_cpd(cpd):
print('CPT: %r' % (cpd,))
index = semtype2_nice[cpd.semtype]
if cpd.evidence is None:
columns = ['None']
else:
basis_lists = [semtype2_nice[var2_cpd[ename].semtype] for ename in cpd.evidence]
columns = [','.join(x) for x in ut.iprod(*basis_lists)]
data = cpd.get_cpd()
print(pd.DataFrame(data, index=index, columns=columns))
for cpd in name_model.get_cpds():
print('----')
print(cpd._str('phi'))
print_cpd(cpd)
# --- INFERENCE ---
Ni = name_cpds[0]
event_space_combos = {}
event_space_combos[Ni.variable] = 0 # Set ni to always be Fred
for cpd in cpd_list:
if cpd.semtype == 'score':
event_space_combos[cpd.variable] = list(range(cpd.variable_card))
evidence_dict = ut.all_dict_combinations(event_space_combos)
# Query about name of annotation k given different event space params
def pretty_evidence(evidence):
return [key + '=' + str(semtype2_nice[var2_cpd[key].semtype][val])
for key, val in evidence.items()]
def print_factor(factor):
row_cards = factor.cardinality
row_vars = factor.variables
values = factor.values.reshape(np.prod(row_cards), 1).flatten()
# col_cards = 1
# col_vars = ['']
basis_lists = list(zip(*list(ut.iprod(*[range(c) for c in row_cards]))))
nice_basis_lists = []
for varname, basis in zip(row_vars, basis_lists):
cpd = var2_cpd[varname]
_nice_basis = ut.take(semtype2_nice[cpd.semtype], basis)
nice_basis = ['%s=%s' % (varname, val) for val in _nice_basis]
nice_basis_lists.append(nice_basis)
row_lbls = [', '.join(sorted(x)) for x in zip(*nice_basis_lists)]
print(ut.repr3(dict(zip(row_lbls, values)), precision=3, align=True, key_order_metric='-val'))
# name_belief = BeliefPropagation(name_model)
name_belief = VariableElimination(name_model)
def try_query(evidence):
print('--------')
query_vars = ut.setdiff_ordered(varnames, list(evidence.keys()))
evidence_str = ', '.join(pretty_evidence(evidence))
probs = name_belief.query(query_vars, evidence)
factor_list = probs.values()
joint_factor = pgmpy.factors.factor_product(*factor_list)
print('P(' + ', '.join(query_vars) + ' | ' + evidence_str + ')')
# print(six.text_type(joint_factor))
factor = joint_factor # NOQA
# print_factor(factor)
# import utool as ut
print(ut.hz_str([(f._str(phi_or_p='phi')) for f in factor_list]))
for evidence in evidence_dict:
try_query(evidence)
evidence = {'Aij': 1, 'Ajk': 1, 'Aki': 1, 'Ni': 0}
try_query(evidence)
evidence = {'Aij': 0, 'Ajk': 0, 'Aki': 0, 'Ni': 0}
try_query(evidence)
globals()['score_nice'] = score_nice
globals()['name_nice'] = name_nice
globals()['score_basis'] = score_basis
globals()['nid_basis'] = nid_basis
print('Independencies')
print(name_model.get_independencies())
print(name_model.local_independencies([Ni.variable]))
# name_belief = BeliefPropagation(name_model)
# # name_belief = VariableElimination(name_model)
# for case in special_cases:
# test_data = case.drop('Lk', axis=1)
# test_data = test_data.reset_index(drop=True)
# print('----')
# for i in range(test_data.shape[0]):
# evidence = test_data.loc[i].to_dict()
# probs = name_belief.query(['Lk'], evidence)
# factor = probs['Lk']
# probs = factor.values
# evidence_ = evidence.copy()
# evidence_['Li'] = name_nice[evidence['Li']]
# evidence_['Lj'] = name_nice[evidence['Lj']]
# evidence_['Sij'] = score_nice[evidence['Sij']]
# evidence_['Sjk'] = score_nice[evidence['Sjk']]
# nice2_prob = ut.odict(zip(name_nice, probs.tolist()))
# ut.print_python_code('P(Lk | {evidence}) = {cpt}'.format(
# evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)),
# cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val')
# ))
# for case in special_cases:
# test_data = case.drop('Lk', axis=1)
# test_data = test_data.drop('Lj', axis=1)
# test_data = test_data.reset_index(drop=True)
# print('----')
# for i in range(test_data.shape[0]):
# evidence = test_data.loc[i].to_dict()
# query_vars = ['Lk', 'Lj']
# probs = name_belief.query(query_vars, evidence)
# for queryvar in query_vars:
# factor = probs[queryvar]
# print(factor._str('phi'))
# probs = factor.values
# evidence_ = evidence.copy()
# evidence_['Li'] = name_nice[evidence['Li']]
# evidence_['Sij'] = score_nice[evidence['Sij']]
# evidence_['Sjk'] = score_nice[evidence['Sjk']]
# nice2_prob = ut.odict(zip([queryvar + '=' + x for x in name_nice], probs.tolist()))
# ut.print_python_code('P({queryvar} | {evidence}) = {cpt}'.format(
# query_var=query_var,
# evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)),
# cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val')
# ))
# _ draw model
fig = pt.figure() # NOQA
fig.clf()
ax = pt.gca()
netx_nodes = [(node, {}) for node in name_model.nodes()]
netx_edges = [(etup[0], etup[1], {}) for etup in name_model.edges()]
netx_graph = netx.DiGraph()
netx_graph.add_nodes_from(netx_nodes)
netx_graph.add_edges_from(netx_edges)
# pos = netx.graphviz_layout(netx_graph)
pos = netx.pydot_layout(netx_graph, prog='dot')
netx.draw(netx_graph, pos=pos, ax=ax, with_labels=True)
pt.plt.savefig('foo.png')
ut.startfile('foo.png') | 05853fb3a7e84a1864399588af4b27390a1c8d31 | 3,651,829 |
def sigma_R(sim, Pk=None, z=None, non_lin=False):
""" return amplitude of density fluctuations
if given Pk -- C++ class Extrap_Pk or Extrap_Pk_Nl -- computes its sigma_R.
if given redshift, computes linear or non-linear (emulator) amplitude of density fluctuations """
sigma = fs.Data_Vec_2()
if Pk: # compute amplitude of density fluctuations from given continuous power spectrum
fs.gen_sigma_binned_gsl_qawf(sim, Pk, sigma)
elif z is not None: # compute (non-)linear amplitude of density fluctuations
a = 1./(1.+z) if z != 'init' else 1.0
if non_lin:
fs.gen_sigma_func_binned_gsl_qawf_lin(sim, a, sigma)
else:
fs.gen_sigma_func_binned_gsl_qawf_nl(sim, a, sigma)
else:
raise KeyError("Function 'sigma_R' called without arguments.")
return get_ndarray(sigma) | 956a4ca092ce56c1d8120c3b9047280306005326 | 3,651,830 |
def session_ended_request_handler(handler_input):
"""Handler for Session End."""
# type: (HandlerInput) -> Response
logger.info("Entering AMAZON.SessionEndedRequest")
save_data(handler_input)
return handler_input.response_builder.response | a3bd1c38699a69da0cdce0203ee0549e9132b1c1 | 3,651,831 |
import unittest
def _getTestSuite(testFiles):
"""
Loads unit tests recursively from beneath the current directory.
Inputs: testFiles - If non-empty, a list of unit tests to selectively run.
Outputs: A unittest.TestSuite object containing the unit tests to run.
"""
loader = unittest.TestLoader()
if testFiles:
return loader.loadTestsFromNames([".".join(TEST_DIR, testFile) for testFile in testFiles])
return loader.discover(TEST_DIR) | 786baa4d70161e1ae6c60160460f379c66ea465c | 3,651,832 |
def stratifiedsmooth2stratifiedwavy_c(rho_gas, rho_liq, vel_gas, d_m, beta, mu_liq, mu_gas):
"""
function for construction of boundary transition from stratified-smooth to stratified-wavy structure
resulting from the "wind" effect
:param rho_gas: gas density
:param rho_liq: liquid density
:param vel_gas: superficial gas velocity
:param d_m: pipe diameter
:param beta: angle of inclination from the horizontal
:param mu_liq: liquid viscosity
:param mu_gas: gas viscosity
:return: superficial liquid velocity
"""
froude_number = (rho_gas / (rho_liq - rho_gas)) ** 0.5 * vel_gas / (d_m * uc.g * np.cos(beta * uc.pi / 180)) ** 0.5
vel_liq_0 = 0.0000001
def equation2solve(vel_liq):
re_sl = reynolds_number(rho_liq, vel_liq, d_m, mu_liq)
k = froude_number * re_sl ** 0.5
# k = froude_number ** 2 * re_sl
x = parameter_x(d_m, rho_liq, rho_gas, mu_liq, mu_gas, vel_gas, vel_liq)
y = parameter_y(d_m, rho_liq, rho_gas, mu_gas, vel_gas, beta)
h_l = combined_momentum_equation(x, y, d_m, rho_liq, rho_gas, mu_liq, mu_gas, vel_gas, vel_liq)
variables = dimensionless_variables(h_l)
v_g = variables[6]
s = 0.01
v_l = variables[5]
equation = k - 2 / (v_l ** 0.5 * v_g * s ** 0.5)
return equation
vel_liq = opt.fsolve(equation2solve, np.array(vel_liq_0))
return vel_liq | a80c1b5f400d4db36979960a26f5b914047abe8d | 3,651,833 |
def box(
data_frame=None,
x=None,
y=None,
color=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
hover_name=None,
hover_data=None,
custom_data=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
color_discrete_sequence=None,
color_discrete_map=None,
orientation=None,
boxmode=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
points=None,
notched=False,
title=None,
template=None,
width=None,
height=None,
):
"""
In a box plot, rows of `data_frame` are grouped together into a
box-and-whisker mark to visualize their distribution.
Each box spans from quartile 1 (Q1) to quartile 3 (Q3). The second
quartile (Q2) is marked by a line inside the box. By default, the
whiskers correspond to the box' edges +/- 1.5 times the interquartile
range (IQR: Q3-Q1), see "points" for other options.
"""
return make_figure(
args=locals(),
constructor=go.Box,
trace_patch=dict(boxpoints=points, notched=notched, x0=" ", y0=" "),
layout_patch=dict(boxmode=boxmode),
) | 2e5a22fd4fa875b4cb506c7d21ff91e56908ed65 | 3,651,834 |
def _infer_main_columns(df, index_level='Column Name', numeric_dtypes=_numeric_dtypes):
"""
All numeric columns up-until the first non-numeric column are considered main columns.
:param df: The pd.DataFrame
:param index_level: Name of the index level of the column names. Default 'Column Name'
:param numeric_dtypes: Set of numpy.dtype containing all numeric types. Default int/float.
:returns: The names of the infered main columns
"""
columns = df.columns.get_level_values(index_level)
main_columns = []
for i,dtype in enumerate(df.dtypes):
if dtype in numeric_dtypes:
main_columns.append(columns[i])
else:
break
return main_columns | 3eae67b765ca7a1a048047e12511c1a9721f9fea | 3,651,835 |
def index():
"""
显示首页
:return:
"""
return render_template('index.html') | 8965ff54f131a0250f1a05183ceb79d6d677883c | 3,651,836 |
def two_phase(model, config):
"""Two-phase simulation workflow."""
wea_path, datetime_stamps = get_wea(config)
smx = gen_smx(wea_path, config.smx_basis, config.mtxdir)
pdsmx = prep_2phase_pt(model, config)
vdsmx = prep_2phase_vu(model, config)
if not config.no_multiply:
calc_2phase_pt(model, datetime_stamps, pdsmx, smx, config)
calc_2phase_vu(datetime_stamps, vdsmx, smx, config)
return pdsmx, vdsmx | 0f2eb619dcfea233446e90565bc1310ee1a3bc3f | 3,651,837 |
def str_with_tab(indent: int, text: str, uppercase: bool = True) -> str:
"""Create a string with ``indent`` spaces followed by ``text``."""
if uppercase:
text = text.upper()
return " " * indent + text | 3306ba86781d272a19b0e02ff8d06da0976d7282 | 3,651,838 |
def delete(card, files=None):
"""Delete individual notefiles and their contents.
Args:
card (Notecard): The current Notecard object.
files (array): A list of Notefiles to delete.
Returns:
string: The result of the Notecard request.
"""
req = {"req": "file.delete"}
if files:
req["files"] = files
return card.Transaction(req) | 2acfa67b7531244e44a183286a9d87b9ac849c83 | 3,651,839 |
def test_timed_info():
"""Test timed_info decorator"""
@timed_info
def target():
return "hello world"
result = target()
assert result == "hello world" | 4deb25b542bcc1a3ad2fc5859c2c3f243060b6d9 | 3,651,840 |
from typing import Set
def get_doc_word_token_set(doc: Doc, use_lemma=False) -> Set[Token]:
"""Return the set of tokens in a document (no repetition)."""
return set([token.lemma_ if use_lemma else token.text for token in get_word_tokens(doc)]) | 56e1d8bfcad363049b4dd455e728d5c4dd3754f5 | 3,651,841 |
from typing import List
def finding_the_percentage(n: int, arr: List[str], query_name: str) -> str:
"""
>>> finding_the_percentage(3, ['Krishna 67 68 69', 'Arjun 70 98 63',
... 'Malika 52 56 60'], 'Malika')
'56.00'
>>> finding_the_percentage(2, ['Harsh 25 26.5 28', 'Anurag 26 28 30'],
... 'Harsh')
'26.50'
"""
student_marks = {}
for i in range(n):
name, *line = arr[i].split()
scores = list(map(float, line))
student_marks[name] = sum(scores)/len(scores)
return '{:.2f}'.format(student_marks[query_name]) | 86c2ad777c667f9ba424bc2b707f46465a10accc | 3,651,842 |
def mvg_logpdf_fixedcov(x, mean, inv_cov):
"""
Log-pdf of the multivariate Gaussian distribution where the determinant and inverse of the covariance matrix are
precomputed and fixed.
Note that this neglects the additive constant: -0.5 * (len(x) * log(2 * pi) + log_det_cov), because it is
irrelevant when comparing pdf values with a fixed covariance, but it means that this is not the normalised pdf.
Args:
x (1D numpy array): Vector value at which to evaluate the pdf.
mean (1D numpy array): Mean vector of the multivariate Gaussian distribution.
inv_cov (2D numpy array): Inverted covariance matrix.
Returns:
float: Log-pdf value.
"""
dev = x - mean
return -0.5 * (dev @ inv_cov @ dev) | 648d1925ed4b4793e8e1ce1cec8c7ccd0efb9f6b | 3,651,843 |
from pathlib import Path
import sys
def app_dir(app_name: str = APP_NAME) -> Path:
"""Finds the application data directory for the current platform.
If it does not exists, it creates the required directory tree.
Returns:
The path to the root app directory.
"""
if sys.platform == "win32":
path = Path.home() / "AppData" / "Local" / app_name
elif sys.platform == "darwin":
path = Path.home() / "Library" / "Application Support" / app_name
else:
path = Path.home() / f".{app_name}"
_create_tree(path)
return path | 9ed4ae1ec1113a806ac2fc9c2488a12ace338c2a | 3,651,844 |
def make_frac_grid(frac_spacing, numrows=50, numcols=50, model_grid=None,
seed=0):
"""Create a grid that contains a network of random fractures.
Creates and returns a grid containing a network of random fractures, which
are represented as 1's embedded in a grid of 0's.
Parameters
----------
frac_spacing : int
Average spacing of fractures (in grid cells)
numrows : int, optional
Number of rows in grid (if model_grid parameter is given,
uses values from the model grid instead)
numcols : int, optional
Number of columns in grid (if model_grid parameter is given,
uses values from the model grid instead)
model_grid : Landlab RasterModelGrid object, optiona
RasterModelGrid to use for grid size
seed : int, optional
Seed used for random number generator
Returns
-------
m : Numpy array
Array containing fracture grid, represented as 0's (matrix) and 1's
(fractures). If model_grid parameter is given, returns a 1D array
corresponding to a node-based array in the model grid. Otherwise,
returns a 2D array with dimensions given by numrows, numcols.
"""
# Make an initial grid of all zeros. If user specified a model grid,
# use that. Otherwise, use the given dimensions.
if model_grid is not None:
numrows = model_grid.shape[0]
numcols = model_grid.shape[1]
m = zeros((numrows,numcols), dtype=int)
# Add fractures to grid
nfracs = (numrows + numcols) // frac_spacing
for i in range(nfracs):
(y, x) = calculate_fracture_starting_position((numrows, numcols), seed+i)
ang = calculate_fracture_orientation((y, x), seed+i)
(dy, dx) = calculate_fracture_step_sizes((y, x), ang)
trace_fracture_through_grid(m, (y, x), (dy, dx))
# If we have a model_grid, flatten the frac grid so it's equivalent to
# a node array.
if model_grid is not None:
m.shape = (m.shape[0]*m.shape[1])
return m | 2e1ffc1bab30726dcbbe1b022c6cf92920c2dcc2 | 3,651,845 |
import colorsys
def generate_colors():
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
N = 30
brightness = 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
perm = [15, 13, 25, 12, 19, 8, 22, 24, 29, 17, 28, 20, 2, 27, 11, 26, 21, 4, 3, 18, 9, 5, 14, 1, 16, 0, 23, 7, 6, 10]
colors = [colors[idx] for idx in perm]
return colors | ee8951d66972190e6d1dcd5dc5c211d5631f6841 | 3,651,846 |
def secant_method(f, x0, x1, iterations):
"""Return the root calculated using the secant method."""
for i in range(iterations):
f_x1 = f(x1)
x2 = x1 - f_x1 * (x1 - x0) / (f_x1 - f(x0) + 1e-9).float()
x0, x1 = x1, x2
return x2 | 081522ae8e68ad14cb67f8afc03989c46f3999d5 | 3,651,847 |
def to_skopt_space(x):
"""converts the space x into skopt compatible space"""
if isinstance(x, list):
if all([isinstance(s, Dimension) for s in x]):
_space = Space(x)
elif len(x) == 1 and isinstance(x[0], tuple):
if len(x[0]) == 2:
if 'int' in x[0][0].__class__.__name__:
_space = Integer(low=x[0][0], high=x[0][1])
elif 'float' in x[0][0].__class__.__name__:
_space = Integer(low=x[0][0], high=x[0][1])
else:
raise NotImplementedError
else:
raise NotImplementedError
elif all([s.__class__.__name__== "Apply" for s in x]):
_space = Space([skopt_space_from_hp_space(v) for v in x])
else:
raise NotImplementedError
elif isinstance(x, dict): # todo, in random, should we build Only Categorical space?
space_ = []
for k, v in x.items():
if isinstance(v, list):
s = space_from_list(v, k)
elif isinstance(v, Dimension):
# it is possible that the user has not specified the name so assign the names
# because we have keys.
if v.name is None or v.name.startswith('real_') or v.name.startswith('integer_'):
v.name = k
s = v
elif v.__class__.__name__== "Apply" or 'rv_frozen' in v.__class__.__name__:
s = skopt_space_from_hp_space(v, k)
elif isinstance(v, tuple):
s = Categorical(v, name=k)
elif isinstance(v, np.ndarray):
s = Categorical(v.tolist(), name=k)
else:
raise NotImplementedError(f"unknown type {v}, {type(v)}")
space_.append(s)
# todo, why converting to Space
_space = Space(space_) if len(space_) > 0 else None
elif 'rv_frozen' in x.__class__.__name__ or x.__class__.__name__== "Apply":
_space = Space([skopt_space_from_hp_space(x)])
else:
raise NotImplementedError(f"unknown type {x}, {type(x)}")
return _space | c5f4afdf2f3f3e36abe6cf8a8e12e20992aa13f7 | 3,651,848 |
import requests
def build_response(status=OK, etag='etag', modified='modified', max_age=None):
"""Make a requests.Response object suitable for testing.
Args:
status: HTTP status
exp-time: cache expire time (set to future for fresh cache, past for
stale cache (defaults to stale))
etag: etag cache-control header
modified: last-modified cache-control header
Returns:
A Response instance populated according to the arguments.
"""
headers = {'last-modified': modified, 'etag': etag, 'Cache-Control':
'max-age={}'.format(max_age)}
test_response = requests.Response()
test_response.status_code = status
test_response.headers = headers
return test_response | f9a97da74b7802511180f30dc45e9df5d5e87f51 | 3,651,849 |
from typing import List
from re import T
def split_list(big_list: List[T], delimiter: T) -> List[List[T]]:
"""Like string.split(foo), except for lists."""
cur_list: List[T] = []
parts: List[List[T]] = []
for item in big_list:
if item == delimiter:
if cur_list:
parts.append(cur_list)
cur_list = []
else:
cur_list.append(item)
if cur_list:
parts.append(cur_list)
return parts | c56dd88a7376f002ae6b91c3b227c8a16991ca31 | 3,651,850 |
def generate_partitions(data):
"""
Generates a random nested partition for an array of integers
:param data:
:return:
"""
if len(data) == 1:
return data
else:
mask1 = np.random.choice(len(data), np.floor(len(data)/2), replace=False)
par1 = [data[i] for i in range(len(data)) if i in mask1]
par2 = [data[i] for i in range(len(data)) if i not in mask1]
return [generate_partitions(par1), generate_partitions(par2)] | 164749c135de1cf690bb209a18270a5550cdefc8 | 3,651,851 |
import os
import shutil
def adjust_shapefile_to_aoi(data_uri, aoi_uri, output_uri, \
empty_raster_allowed = False):
"""Adjust the shapefile's data to the aoi, i.e.reproject & clip data points.
Inputs:
- data_uri: uri to the shapefile to adjust
- aoi_uri: uir to a single polygon shapefile
- base_path: directory where the intermediate files will be saved
- output_uri: dataset that is clipped and/or reprojected to the
aoi if necessary.
- empty_raster_allowed: boolean flag that, if False (default),
causes the function to break if output_uri is empty, or return
an empty raster otherwise.
Returns: output_uri
"""
# Data and aoi are the same URIs, data is good as it is, return it.
if data_uri == aoi_uri:
return data_uri
# Split the path apart from the filename
head, tail = os.path.split(output_uri)
# Split the file basename from the file extension
base, _ = os.path.splitext(tail)
# Open URIs and get the projections
data = ogr.Open(data_uri)
message = "OGR Can't open " + data_uri
assert data is not None, message
aoi = ogr.Open(aoi_uri)
data_wkt = shapefile_wkt(data)
aoi_wkt = shapefile_wkt(aoi)
if projections_match([data_wkt, aoi_wkt]):
# Same projections, just clip
clip_datasource(aoi, data, output_uri)
else:
# Reproject the aoi to be in data's projection
projected_aoi_uri = os.path.join(head, base + '_projected_aoi')
# TODO: include this in raster utils
# Removing output_uri if it already exists
if os.path.isdir(projected_aoi_uri):
shutil.rmtree(projected_aoi_uri)
pygeoprocessing.geoprocessing.reproject_datasource(aoi, data_wkt, projected_aoi_uri)
# Clip all the shapes outside the aoi
out_uri = os.path.join(head, base + '_clipped')
clip_datasource(ogr.Open(projected_aoi_uri), data, out_uri)
# Convert the datasource back to the original projection (aoi's)
# Removing output_uri if it already exists
if os.path.isdir(output_uri):
shutil.rmtree(output_uri)
pygeoprocessing.geoprocessing.reproject_datasource(ogr.Open(out_uri), aoi_wkt, \
output_uri)
# Ensure the resulting file's 1st layer is not empty
out_shapefile = ogr.Open(output_uri)
out_layer = out_shapefile.GetLayer(0)
out_feature_count = out_layer.GetFeatureCount()
out_layer = None
out_shapefile = None
# Break if returning an empty raster is not allowed
if not empty_raster_allowed:
message = 'Error: first layer of ' + output_uri + ' is empty. Are ' + \
data_uri + ' and ' + aoi_uri + ' mis-aligned?'
assert out_feature_count > 0, message
return output_uri | 83fcefaebf2adff02a360a5186fbf219de5579a2 | 3,651,852 |
def randomRectangularCluster(nRow, nCol, minL, maxL, mask=None):
"""
Create a random rectangular cluster neutral landscape model with
values ranging 0-1.
Parameters
----------
nRow : int
The number of rows in the array.
nCol : int
The number of columns in the array.
minL: int
The minimum possible length of width and height for each random
rectangular cluster.
maxL: int
The maximum possible length of width and height for each random
rectangular cluster.
mask : array, optional
2D array used as a binary mask to limit the elements with values.
Returns
-------
out : array
2D array.
"""
if mask is None:
mask = np.ones((nRow, nCol))
# Create an empty array of correct dimensions
array = np.zeros((nRow, nCol)) - 1
# Keep applying random clusters until all elements have a value
while np.min(array) == -1:
width = np.random.choice(range(minL, maxL))
height = np.random.choice(range(minL, maxL))
row = np.random.choice(range(-maxL, nRow))
col = np.random.choice(range(-maxL, nCol))
array[row:row + width, col:col + height] = np.random.random()
# Apply mask and rescale 0-1
maskedArray = maskArray(array, mask)
rescaledArray = linearRescale01(maskedArray)
return(rescaledArray) | b53db06114ac3a465c1e0444bed59aa7403bba83 | 3,651,853 |
import os
import pickle
def auth(credentials_file_path):
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(credentials_file_path+'token.pickle'):
with open(credentials_file_path+'token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_file_path+'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(credentials_file_path+'token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
return service | b9b65c1976ec5574f9ec2bc56b3c5d0e827cd4a6 | 3,651,854 |
def add_arc():
"""
:return: arc object
"""
l_hand = GArc(200, 200, 60, 150, x=480, y=270)
l_hand.filled = True
l_hand.fill_color = "#8eded9"
r_hand = GArc(200, 200, -30, 120, x=650, y=300)
r_hand.filled = True
r_hand.fill_color = "#8eded9"
return l_hand, r_hand | 667004d534d58ab11e9b41ca42572dc445ffcf7d | 3,651,855 |
def get_data_item_or_add(results_dic, name, n_hid, epochs, horizon, timesteps):
""" Return or create a new DataItem in `results_dic` with the corresponding
metadata.
"""
if name not in results_dic:
results_dic[name] = []
found = False
for item in results_dic[name]:
if item.is_metadata(n_hid, epochs, horizon, timesteps):
found = True
return item
if not found:
results_dic[name].append(
DataItem(n_hid, epochs, horizon, timesteps))
return results_dic[name][-1] | e6c713cd89b7a9816f52be11a4730f1cef60355c | 3,651,856 |
def midcurve_atm_fwd_rate(asset: Asset, expiration_tenor: str, forward_tenor: str, termination_tenor: str,
benchmark_type: str = None,
floating_rate_tenor: str = None,
clearing_house: str = None, location: PricingLocation = None, *, source: str = None,
real_time: bool = False) -> Series:
"""
GS end-of-day atm forward rate for swaption vol matrices.
:param asset: asset object loaded from security master
:param expiration_tenor: relative date representation of expiration date on the option e.g. 3m
:param forward_tenor: relative date representation of swap's start date after option expiry e.g. 2y
:param termination_tenor: relative date representation of the instrument's expiration date e.g. 1y
:param benchmark_type: benchmark type e.g. LIBOR
:param floating_rate_tenor: floating index rate
:param clearing_house: Example - "LCH", "EUREX", "JSCC", "CME"
:param location: Example - "TKO", "LDN", "NYC"
:param source: name of function caller
:param real_time: whether to retrieve intraday data instead of EOD
:return: swaption implied normal volatility curve
"""
df = _get_swaption_measure(asset, benchmark_type=benchmark_type, floating_rate_tenor=floating_rate_tenor,
effective_date=forward_tenor, expiration_tenor=expiration_tenor,
termination_tenor=termination_tenor, clearing_house=clearing_house, source=source,
real_time=real_time, start=DataContext.current.start_date,
end=DataContext.current.end_date,
query_type=QueryType.MIDCURVE_ATM_FWD_RATE, location=location)
return _extract_series_from_df(df, QueryType.MIDCURVE_ATM_FWD_RATE) | e77ba9ef705c2eefe0f46431862697b6a840d6fd | 3,651,857 |
def extrode_multiple_urls(urls):
""" Return the last (right) url value """
if urls:
return urls.split(',')[-1]
return urls | 34ec560183e73100a62bf40b34108bb39f2b04b4 | 3,651,858 |
def build_header(cp: Config) -> str:
"""Build the email header for a SMTP email message"""
header = '\n'.join([
'From: {}'.format(cp.sender),
'To: {}'.format(''.join(cp.receiver)),
'Subject: {}\n\n'.format(cp.subject)
])
return header | a0c9fdc820d4a454c0384c46775d3e1359710fad | 3,651,859 |
def apex_distance(r0, rc, Rc, uvec):
"""
Implements equation (E4) of TYH18
"""
R0 = rc + Rc * uvec - r0
return np.hypot(*R0) | f88d59727fce25306ae6ef0856941efdbb80a712 | 3,651,860 |
def pixel2phase(data):
"""
converts each channel of images in the data to phase component of its 2-dimensional discrete Fourier transform.
:param data: numpy array with shape (nb_images, img_rows, img_cols, nb_channels)
:return: numpy array with same shape as data
"""
channels = data.shape[-1]
return fourier(data)[:, :, :, channels:] | 1b6b2c513cc20fe9642dd375dd17ee2205692912 | 3,651,861 |
def take_last_while(predicate, list):
"""Returns a new list containing the last n elements of a given list, passing
each value to the supplied predicate function, and terminating when the
predicate function returns false. Excludes the element that caused the
predicate function to fail. The predicate function is passed one argument:
(value)"""
for i, e in enumerate(reversed(list)):
if not predicate(e):
return list[-i:]
return list | 19468c9130e9ab563eebd97c30c0e2c74211e44b | 3,651,862 |
import re
from bs4 import BeautifulSoup
def get_notes() -> str:
"""Scrape notes and disclaimers from dashboard."""
# As of 6/5/20, the only disclaimer is "Data update weekdays at 4:30pm"
with get_firefox() as driver:
notes = []
match = re.compile('disclaimers?', re.IGNORECASE)
driver.implicitly_wait(30)
driver.get(dashboard_url)
soup = BeautifulSoup(driver.page_source, 'html5lib')
has_notes = False
text = soup.get_text().splitlines()
for text_item in text:
if match.search(text_item):
notes.append(text_item.strip())
has_notes = True
if not has_notes:
raise FormatError(
"This dashboard url has changed. None of the <div> elements contains'Disclaimers' " + dashboard_url)
return '\n\n'.join(notes) | 7ec0efab1c5ed17c1878ece751bffe82d77f0105 | 3,651,863 |
def signup_logout(request):
"""
Just wrapping the built in
"""
return logout_view(request, template_name='logged_out.html') | 14c403720c396aa8bbb37752ce304bb2804dd46b | 3,651,864 |
def stress_rotation(stress, angle):
"""
Rotates a stress vector against a given angle.
This rotates the stress from local to the global axis sytem.
Use a negative angle to rotate from global to local system.
The stress vector must be in Voigt notation and engineering stress is used.
Parameters
----------
stress : vector
The matrix that must be rotated.
angle : float
The rotation angle in degrees.
Returns
-------
stress_rot : vector
A rotated version of the matrix.
"""
angle = angle * np.pi/180 # convert to radians
m = np.cos(-angle)
n = np.sin(-angle)
T1_inv = np.matrix([[m**2, n**2, 2*m*n],
[n**2, m**2, -2*m*n],
[-m*n, m*n, m**2-n**2]])
stress_rot = T1_inv * stress
return stress_rot | 96ae75ae61fdbee0cf120e6d705cadc265452e7d | 3,651,865 |
def blue_noise(x=None, hue=None, data=None, dodge=False, orient='v', plot_width=None,
color='black', palette='tab10', size=3, centralized=False,
filename='', scaling=10):
""" Renders a *Blue Noise Plot* from the given data.
Args:
x (str in data): Variables that specify positions on the data-encoding axes.
hue (str in data): Optional. Grouping variable that will produce points with different
colors.
data (pandas.DataFrame): Input data structure. Long-form collection of vectors that can be
assigned to named variables.
dodge (boolean): Optional. Wether to dodge the categorical classes of the plot.
Defaults to False.
orient ("v" | "h"): Optional. Orientation of the plot (vertical or horizontal).
Defaults to 'v'.
color (str): Color to use for markers, in case there is only one class (hue not given).
Defaults to 'black'.
palette (str): Method for choosing the colors to use when mapping the hue semantic.
String values are passed to color_palette(). List or dict values imply
categorical mapping, while a colormap object implies numeric mapping.
Defaults to 'tab10'.
size (float): The marker size in points**2.
centralized (boolean): Optional. Where the plot should be centralized or not.
Defaults to False.
plot_width (float): Optional. Width of the plot. This is a ratio, assuming the encoding axis
is between 0 and 1. So, 0.2 for plot_width would give you a plot with is
5 times as wide in the encoding axis as in the non-encoding axis.
filename (str): Optional. Filename of the plot to render.
scaling (int): Optional. Scaling for the size of plot.
Defaults to 10 for a 740 pixel lot (long side).
Returns:
List[List[[float, float]]] 2D-Array, relaxed points. Here the first dimension of the array
encodes the clases in the data. So for a single-class blue noise plot,
len(blue_noise_plot) would be 1.
Each of these arrays contains arrays with points within this class.
"""
return __plot(x=x, hue=hue, data=data, dodge=dodge, orient=orient, plot_width=plot_width,
color=color, palette=palette, size=size, centralized=centralized,
filename=filename, scaling=scaling, method='blue_noise') | 8743dacba9ddac1b1e73e676962d850876c5b2f3 | 3,651,866 |
def get_repository(auth_user: check_auth, repository_id: hug.types.text):
"""
GET: /repository/{repository_id}
Returns the CLA repository requested by UUID.
"""
return cla.controllers.repository.get_repository(repository_id) | ad9bb45a4d4526b790abb7f89d72a3deafb2d10f | 3,651,867 |
def _isDebug():
"""*bool* = "--debug" or "--debugger" """
return options is not None and (options.debug or options.debugger) | f6670a5c5711e2f5e4c29b6b4d5356813b302ee9 | 3,651,868 |
def _and(mat,other,obj,m):
"""
Can only be used with '&' operator not with 'and'
Multi-column boolean matrices' values are compared with 'and' operator, meaning that 1 false value
causes whole row to be reduced to a false value
"""
if mat.BOOL_MAT:
if isinstance(other,obj):
if mat.dim!=other.dim:
raise ValueError("Dimensions of the matrices don't match")
if not other.BOOL_MAT:
raise TypeError("Can't compare bool matrix to non-bool matrix")
d0,d1 = mat.dim
o = other.matrix
true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False]
data = []
#Reduce multiple columns into one
#Remove rows with false boolean values
for i in range(d0):
mrow,orow = m[i],o[i]
if (false in mrow) or (false in orow):
data.append([false])
continue
data.append([true])
return obj(dim=[d0,1],
data=data,
features=mat.features[:1],
index=mat.index[:],
implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false})
else:
d0,d1 = mat.dim
true,false = mat.DEFAULT_BOOL[True],mat.DEFAULT_BOOL[False]
data = []
if isinstance(other,obj):
if mat.dim!=other.dim:
raise ValueError("Dimensions of the matrices don't match")
if other.BOOL_MAT:
raise TypeError("Can't compare non-bool matrix to bool matrix")
o = other.matrix
for i in range(d0):
mrow,orow = m[i],o[i]
data.append([true if (bool(mrow[j]) and bool(orow[j])) else false for j in range(d1)])
elif isinstance(other,list):
if mat.d1!=len(other):
raise ValueError("Length of the list doesn't match matrix's column amount")
for i in range(d0):
mrow = m[i]
data.append([true if (bool(mrow[j]) and bool(other[j])) else false for j in range(d1)])
else:
for i in range(d0):
mrow = m[i]
data.append([true if (bool(mrow[j]) and bool(other)) else false for j in range(d1)])
return obj(dim=[d0,d1],
data=data,
features=mat.features[:],
index=mat.index[:],
implicit=True,BOOL_MAT=True,DEFAULT_BOOL={True:true,False:false}) | ac4e4d5f205c6aeb068d9fb427839e4e8f85f0ea | 3,651,869 |
import copy
def MAP (request, resource, optimize_already_mapped_nfs=True,
migration_handler_name=None, migration_coeff=None,
load_balance_coeff=None, edge_cost_coeff=None,
time_limit=None, mip_gap_limit=None, node_limit=None, logger=None,
**migration_handler_kwargs):
"""
Starts an offline optimization of the 'resource', which may contain NFs for
considering migration if optimize_already_mapped_nfs is set. 'request' should
be new NF-s to be mapped during the reoptimization of 'resource'.
If 'optimize_already_mapped_nfs' is set to false, 'request' should contain
only NF-s which are net yet mapped to resource.
:param mip_gap_limit: termination optimality condition for the MILP
:param time_limit: termination execution time condition for the MILP
:param node_limit:
:param optimize_already_mapped_nfs:
:param request:
:param resource:
:param migration_handler_name:
:param migration_handler_kwargs:
:return:
"""
# Make heuristic and MILP even in number of large object deepcopies
# This would also be required for correct behaviour (Maybe the mapping
# shouldn't change the input NFFG)
request = copy.deepcopy(request)
resource = copy.deepcopy(resource)
# overwrite logger object if we got one from the caller!
if logger is not None:
global log
log = logger
migration_handler = None
req_nf_ids = [nf.id for nf in request.nfs]
if optimize_already_mapped_nfs:
# This is a full reoptimization, add VNFs and everything from resource to
# request for reoptimization!
for vnf in resource.nfs:
if vnf.id not in req_nf_ids:
# log.debug("Adding NF %s to request for reoptimization."%vnf.id)
request.add_nf(vnf)
NFFGToolBox.recreate_all_sghops(resource)
for sg in resource.sg_hops:
if not request.network.has_edge(sg.src.node.id, sg.dst.node.id,
key=sg.id):
# log.debug("Adding SGHop %s to request from resource."%sg.id)
add_saps_if_needed_for_link(sg, request)
request.add_sglink(sg.src, sg.dst, hop=sg)
# reqs in the substrate (requirements satisfied by earlier mapping) needs
# to be respected by the reoptimization, and mogration can only be done
# if it is not violated!
log.debug("e2e reqs in request:%s, e2e reqs in resource, e.g: %s"%
([r.sg_path for r in request.reqs],
[r.sg_path for r in resource.reqs][:20]))
# log.debug("SAPs in resource: %s" % [s for s in resource.saps])
for req in resource.reqs:
# all possible SAPs should be added already!
if not request.network.has_edge(req.src.node.id, req.dst.node.id,
key=req.id):
# log.debug("Adding requirement link on path %s between %s and %s to request to preserve it "
# "during reoptimization"%(req.sg_path, req.src, req.dst))
add_saps_if_needed_for_link(req, request)
# bandwidth requirement of the already mapped SGHops are stored by
# the resource graph!
req.bandwidth = 0.0
request.add_req(req.src, req.dst, req=req)
# We have to deal with migration in this case only.
if migration_handler_name is not None and type(
migration_handler_name) is str:
migration_cls = eval("migration_costs." + migration_handler_name)
# This resource NFFG needs to include all VNFs, which may play any role in
# migration or mapping. Migration need to know about all of them for
# setting zero cost for not yet mapped VNFs
migration_handler = migration_cls(request, resource,
**migration_handler_kwargs)
else:
# No migration can happen! We just map the given request and resource
# with MILP.
# Fail if there is VNF which is mapped already!
for vnf in resource.nfs:
if vnf.id in req_nf_ids:
raise uet.BadInputException("If 'optimize_already_mapped_nfs' is set to "
"False, request shouldn't contain VNFs "
"from resource", "VNF %s is both in request "
"and resource!"%vnf.id)
mappedNFFG = convert_mip_solution_to_nffg([request], resource,
migration_handler=migration_handler,
migration_coeff=migration_coeff,
load_balance_coeff=load_balance_coeff,
edge_cost_coeff=edge_cost_coeff,
reopt=optimize_already_mapped_nfs,
time_limit=time_limit, mip_gap_limit=mip_gap_limit,
node_limit=node_limit)
if mappedNFFG is not None:
try:
mappedNFFG.calculate_available_node_res()
mappedNFFG.calculate_available_link_res([])
except RuntimeError as re:
log.error("MILP's resulting NFFG is invalid: %s"%re.message)
raise uet.InternalAlgorithmException("MILP's mapping is invalid!!")
return mappedNFFG
else:
raise uet.MappingException("MILP couldn't map the given service request.",
False) | 28d74396d14e75fc3c2865461de40aa9cf2586e7 | 3,651,870 |
def _parcel_profile_helper(pressure, temperature, dewpt):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpt)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure.m), press_lcl.m):
return (press_lower[:-1], press_lcl, np.array([]) * press_lower.units,
temp_lower[:-1], temp_lcl, np.array([]) * temp_lower.units)
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:]) | 2e9abd03dbf4617e53ea19ac7a415025567195e8 | 3,651,871 |
import os
import glob
import hashlib
def extract_subject(subj, mask_name, summary_func=np.mean,
residual=False, exp_name=None):
"""Extract timeseries from within a mask, summarizing flexibly.
Parameters
----------
subj : string
subject name
mask_name : string
name of mask in data hierarchy
summary_func : callable or None
callable to reduce data over voxel dimensions. can take an
``axis`` argument to operate over each frame, if this
argument does not exist the function will be called on the
n_tr x n_voxel array. if None, simply returns all voxels.
residual : boolean
If True, extract from the registered residual timecourse.
exp_name : string
experiment name, if not using the default experiment
Returns
-------
data : dict with ndarray
datta array is n_runs x n_timepoint x n_dimension,
data are not otherwise altered
"""
project = gather_project_info()
if exp_name is None:
exp_name = project["default_exp"]
# Get a path to the file where
cache_dir = op.join(project["analysis_dir"],
exp_name, subj, "evoked")
try:
os.makedirs(cache_dir)
except OSError:
pass
if summary_func is None:
func_name = ""
else:
func_name = summary_func.__name__
cache_fname = mask_name + "_" + func_name
cache_fname = cache_fname.strip("_") + ".npz"
cache_file = op.join(cache_dir, cache_fname)
# Get paths to the relevant files
mask_file = op.join(project["data_dir"], subj, "masks",
"%s.nii.gz" % mask_name)
ts_dir = op.join(project["analysis_dir"], exp_name, subj,
"reg", "epi", "unsmoothed")
n_runs = len(glob(op.join(ts_dir, "run_*")))
ftemp = op.join(ts_dir, "run_{:d}/{}_xfm.nii.gz")
fstem = "res4d" if residual else "timeseries"
ts_files = [ftemp.format(r_i, fstem) for r_i in range(1, n_runs+1)]
# Get the hash value for this extraction
cache_hash = hashlib.sha1()
cache_hash.update(mask_name)
cache_hash.update(str(op.getmtime(mask_file)))
for ts_file in ts_files:
cache_hash.update(str(op.getmtime(ts_file)))
cache_hash = cache_hash.hexdigest()
# If the file exists and the hash matches, return the data
if op.exists(cache_file):
with np.load(cache_file) as cache_obj:
if cache_hash == str(cache_obj["hash"]):
return dict(cache_obj.items())
# Otherwise, do the extraction
data = []
mask = nib.load(mask_file).get_data().astype(bool)
for run, ts_file in enumerate(ts_files):
ts_data = nib.load(ts_file).get_data()
roi_data = ts_data[mask].T
if summary_func is None:
data.append(roi_data)
continue
# Try to use the axis argument to summarize over voxels
try:
roi_data = summary_func(roi_data, axis=1)
# Catch a TypeError and just call the function
# This lets us do e.g. a PCA
except TypeError:
roi_data = summary_func(roi_data)
data.append(roi_data)
data = np.array(list(map(np.squeeze, data)))
# Save the results and return them
data_dict = dict(data=data, subj=subj, hash=cache_hash)
np.savez(cache_file, **data_dict)
return data_dict | be9759e6cde757fc55bd70d10cb8b784e312b0d2 | 3,651,872 |
def best_param_search(low=1, margin=1, func=None):
"""
Perform a binary search to determine the best parameter value.
In this specific context, the best
parameter is (the highest) value of the parameter (e.g. batch size)
that can be used to run a func(tion)
(e.g., training) successfully. Beyond a certain value,
the function fails to run for reasons such as out-of-memory.
param low: a starting low value to start searching from (defaults to 1).
param margin: denotes the margin allowed when choosing the
configuration parameter (and the optimal parameter).
param func: the function that is required to be run with the
configuration parameter.
"""
assert low > 0
assert margin > 0
assert func is not None
# Determine if the function succeeds to run at the starting (low) value.
# If not, keep lowering the value of low until the run succeeds.
try:
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception:
success = False
print("Run failed! The starting value of the parameter is itself too high!\n")
while not success and low > 0:
try:
low = low // 2
print(f"Trying with a parameter value of {low}.")
func(low)
success = True
except Exception:
print("Run failed! Lowering the parameter value.\n")
if not success:
print("The function failed to run even at the lowest parameter value !")
return
# Set coarse limits on low (function succeeds to run) and
# high (function does not succeed running).
while success:
high = 2 * low
try:
print(f"Trying with a parameter value of {high}.")
func(high)
low = high
except Exception:
success = False
print("Run failed!\n")
print(
f"Low and high parameter values set to {low} and {high} respectively."
)
# Binary search to find the optimal value of low (within the margin).
current_margin = high - low
while current_margin > margin:
mid = (low + high) // 2
try:
print(f"Trying with a parameter value of {mid}.")
func(mid)
low = mid
except Exception:
high = mid
print("Run failed!\n")
print(f"Low and high parameter values set to {low} and {high} respectively.")
current_margin = high - low
print(f"Setting the parameter value to {low}\n")
return low | 6392d8c019ebb50a49c46e724e62fd63671a00df | 3,651,873 |
def configure_plugins_plugin_uninstall(request, pk):
"""
Disables a plugin from the system
:param request:
:param pk: The primary key of the plugin to be disabled
:return:
"""
# TODO: See about pulling this out into a common methods
plugin = get_object_or_404(Plugin, pk=pk)
action = reverse(
"api_dispatch_uninstall",
kwargs={"api_name": "v1", "resource_name": "plugin", "pk": pk},
)
ctx = RequestContext(
request,
{
"method": "DELETE",
"action": action,
"i18n": {
"title": ugettext_lazy(
"configure_plugins_plugin_uninstall.title"
), # 'Confirm Uninstall Plugin'
"confirmmsg": ugettext_lazy(
"configure_plugins_plugin_uninstall.messages.confirmmsg.singular"
)
% { # 'Are you sure you want to uninstall %(versionedName)s Plugin (%(id)s)?'
"id": str(pk),
"versionedName": plugin.versionedName(),
},
"submit": ugettext_lazy(
"configure_plugins_plugin_uninstall.action.submit"
), # 'Yes, Uninstall!'
"cancel": ugettext_lazy("global.action.modal.cancel"),
"submitmsg": ugettext_lazy(
"configure_plugins_plugin_uninstall.messages.submitmsg"
), # 'Now uninstalling, please wait.'
},
},
)
return render_to_response(
"rundb/configure/modal_confirm_plugin_uninstall.html", context_instance=ctx
) | f6c849a2dc4fe63ec43119e9e135c4c6385accd0 | 3,651,874 |
def get_supervised_timeseries_data_set(data, input_steps):
"""This function transforms a univariate timeseries into a supervised learning problem where the input consists
of sequences of length input_steps and the output is the prediction of the next step
"""
series = pd.Series(data)
data_set = pd.DataFrame({'t' : series, 't+1' : series.shift(-1)})
if input_steps > 1:
x_values = np.concatenate([data[i:i+input_steps]
.reshape(1, input_steps) for i in range(len(series) - input_steps)])
timesteps_df = pd.DataFrame(x_values[:,:-1], index=np.arange(input_steps - 1, input_steps - 1 + len(x_values)),
columns = ['t-' + str(input_steps - i) for i in range(1, input_steps)])
data_set = pd.concat([timesteps_df, data_set], axis=1, join='inner')
data_set = data_set.dropna()
X = data_set.drop('t+1', axis=1)
y = data_set.loc[:,'t+1']
return (X, y) | 0fce866ea266c15e83e57795f86fcfe4fee4a54e | 3,651,875 |
import os
def load_df(name):
"""Load a pandas dataframe from csv file at results/name."""
load_name = os.path.join(here, "..", "results", name)
df = pd.read_csv(load_name)
return df | 6b96b8368c2afcfa552b98ee65d0ee9d889ee0bc | 3,651,876 |
def load_template_spectra_from_folder(parent_folder,
spectrum_identifier,
normalization=None):
"""
Load template spectrum data into a dictionary. This allows templates from
different folders to be loaded into different dictionaries.
Parameters:
-----------
parent_folder : string
Name of folder or path
spectrum_identifier : string
Radioactive source identifier. Ex: '235U'
normalization : string or boolean
Default = None
Accepts: 'normalheight', 'normalarea', None
How the dataset should be normalized.
Returns:
--------
temp_dict : Dictionary containing all template spectra from a folder.
"""
temp_dict = {}
def normalize_spectrum(ID):
"""
Normalizes the spectrum data.
Parameters:
-----------
ID : string
The ID key for the radioactive source in your spectrum.
Returns:
--------
temp_dict : Dictionary
Contains all normalized datasets.
"""
temp_spectrum = an.read_spectrum(
parent_folder + ID + spectrum_identifier)
if np.max(temp_spectrum) == 0:
print(ID + ' Contains no values')
if normalization is None:
return temp_spectrum
elif normalization == 'normalheight':
return temp_spectrum / np.max(temp_spectrum)
elif normalization == 'normalarea':
return temp_spectrum / np.sum(temp_spectrum)
for i in range(len(an.isotopes) - 3):
temp_dict[an.isotopes[i]] = normalize_spectrum(
an.isotopes_sources_GADRAS_ID[i])
return temp_dict | 466d0eb74de197ddb18e289f072d9451bc7ea2d8 | 3,651,877 |
import json
def remove_screenshot_from_object(request):
"""
Removes the screenshot from being associated with a top-level object.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
analyst = request.user.username
obj = request.POST.get('obj', None)
oid = request.POST.get('oid', None)
sid = request.POST.get('sid', None)
result = delete_screenshot_from_object(obj, oid, sid, analyst)
return HttpResponse(json.dumps(result),
mimetype="application/json") | 36836120c79dc8d825c91370074da09fd2255c6d | 3,651,878 |
import csv
import json
def read_csv_as_dicts(
filename,
newline="",
delimiter=",",
quotechar='"',
encoding="utf-8",
remove_prefix=True,
prefix="dv.",
json_cols=CSV_JSON_COLS,
false_values=["FALSE"],
true_values=["TRUE"],
):
"""Read in CSV file into a list of :class:`dict`.
This offers an easy import functionality of your data from CSV files.
See more at
`csv <https://docs.python.org/3/library/csv.html>`_.
CSV file structure:
1) The header row contains the column names.
2) A row contains one dataset
3) A column contains one specific attribute.
Recommendation: Name the column name the way you want the attribute to be
named later in your Dataverse object. See the
`pyDataverse templates <https://github.com/GDCC/pyDataverse_templates>`_
for this. The created :class:`dict` can later be used for the `set()`
function to create Dataverse objects.
Parameters
----------
filename : str
Filename with full path.
newline : str
Newline character.
delimiter : str
Cell delimiter of CSV file. Defaults to ';'.
quotechar : str
Quote-character of CSV file. Defaults to '"'.
encoding : str
Character encoding of file. Defaults to 'utf-8'.
Returns
-------
list
List with one :class:`dict` each row. The keys of a :class:`dict` are
named after the columen names.
"""
assert isinstance(filename, str)
assert isinstance(newline, str)
assert isinstance(delimiter, str)
assert isinstance(quotechar, str)
assert isinstance(encoding, str)
with open(filename, "r", newline=newline, encoding=encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)
data = []
for row in reader:
data.append(dict(row))
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if val in false_values:
ds_tmp[key] = False
ds_tmp[key] = True
elif val in true_values:
ds_tmp[key] = True
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
if remove_prefix:
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if key.startswith(prefix):
ds_tmp[key[len(prefix) :]] = val
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
if len(json_cols) > 0:
data_tmp = []
for ds in data:
ds_tmp = {}
for key, val in ds.items():
if key in json_cols:
ds_tmp[key] = json.loads(val)
else:
ds_tmp[key] = val
data_tmp.append(ds_tmp)
data = data_tmp
return data | bf15b684120445adf7e6ba8ae18befd64ad6a99f | 3,651,879 |
from typing import Dict
from typing import Union
from typing import Tuple
from typing import List
import torch
from typing import Any
def get_rewrite_outputs(wrapped_model: nn.Module,
model_inputs: Dict[str, Union[Tuple, List,
torch.Tensor]],
deploy_cfg: mmcv.Config,
run_with_backend: bool = True) -> Tuple[Any, bool]:
"""To get outputs of generated onnx model after rewrite.
Args:
wrapped_model (nn.Module): The input model.
model_inputs (dict): Inputs for model.
deploy_cfg (mmcv.Config): Deployment config.
run_with_backend (bool): Whether to run inference with backend.
Default is True.
Returns:
List[torch.Tensor]: The outputs of model.
bool: A flag indicate the type of outputs. If the flag is True, then
the outputs are backend output, otherwise they are outputs of wrapped
pytorch model.
"""
backend = get_backend(deploy_cfg)
with RewriterContext(
cfg=deploy_cfg, backend=backend.value, opset=11), torch.no_grad():
ctx_outputs = wrapped_model(**model_inputs)
ir_type = get_ir_config(deploy_cfg).get('type', None)
if ir_type == IR.TORCHSCRIPT.value:
ir_file_path = get_ts_model(wrapped_model, model_inputs, deploy_cfg)
else: # TODO onnx as default, make it strict when more IR types involved
ir_file_path = get_onnx_model(wrapped_model, model_inputs, deploy_cfg)
backend_outputs = None
if run_with_backend:
backend_outputs = get_backend_outputs(ir_file_path, model_inputs,
deploy_cfg)
if backend_outputs is None:
return ctx_outputs, False
else:
return backend_outputs, True | 147276df37fcb7df604ec14454638605266fbdc1 | 3,651,880 |
import tempfile
import os
def _get_implied_dependencies(path: str) -> list:
""" Attempt to replace _get_requirements_from_file
Extracts import statements via regex.
Does not catch all import statements and its
use was rolled back.
Might still be overhauled and integrated again.
"""
_python_files = search_filename(
base_folder=path,
file_name="**/*.py",
recursive_flag=True
)
_tmp_project_path = tempfile.mkdtemp()
_tmp_file_path = _tmp_project_path + "/dependencies.py"
_tmp_file = open(_tmp_file_path, 'w')
for file in _python_files:
for _import in _get_imports(file):
_tmp_file.write(_import.strip()+'\n')
_tmp_file.close()
try:
_all_imports = pipreqs.get_all_imports(
path=_tmp_project_path,
encoding='utf-8'
)
except (IndentationError, SyntaxError):
return None
# Clean up tmp folder
if os.path.isfile(_tmp_file_path):
os.remove(_tmp_file_path)
if os.path.isdir(_tmp_project_path):
os.rmdir(_tmp_project_path)
_imports = _remove_local_dependencies(path, _all_imports)
return pipreqs.get_pkg_names(_imports) | 5ddd0b00e769f5d529053f00114ef3e5fb33e6da | 3,651,881 |
def get_interface_breakout_param(dut,**kwargs):
"""
Author: Naveen Nag
email : [email protected]
:param dut:
:param interface:
:param fields:
:return: interface breakout speed
Usage:
port.get_interface_breakout_param(dut1, 'Ethernet4')
:return - ['4x10G', 'Completed']
"""
param_breakout = []
if 'interface' not in kwargs :
st.error("Mandatory argument \'interface\' is missing")
return False
if 'Eth' in kwargs['interface']:
st.log('Physical interface name is provided, mapping it to a port group')
res1 = get_interface_breakout_mode(dut, kwargs['interface'], 'port')
if res1:
kwargs['interface'] = 'port ' + res1[0]['port']
else:
st.error('Invalid interface, cannot get the status')
return False
output = st.show(dut, "show interface breakout {}".format(kwargs['interface']), type='klish')
if len(output) == 0:
st.error("Provided interface is not a breakout port")
return False
else:
param_breakout.append(str(output[0]['breakout_mode'].strip('G')))
param_breakout.append(output[0]['status'])
return param_breakout | 43286c8dbc29fef096d34c567f3f7c4ff2a06691 | 3,651,882 |
def get_process_basic_window_enriched(cb, print_detail, window):
"""
Text
Args:
cb (CBCloudAPI): API object
print_detail (bool): whether to print full info to the console, useful for debugging
window (str): period to search
Returns:
process_guid of the first process in the returned list
"""
print("\n----------------------------------------------------------")
print("API Calls:")
print("Start a Process Search (v2)")
print("Get the Status of a Process Search (v1)")
print("Retrieve Results for a Process Search (v2)\n")
process_query = cb.select(Process).where("enriched:true")
process_query.set_time_range(window=window)
matching_processes = [process for process in process_query]
print(f"There are {len(matching_processes)} found in {window} of processes")
if print_detail:
for process in matching_processes:
print("{0:16} {1:5} {2:20}".format(process.device_name, process.process_pids[0], process.process_guid))
try:
print(f"process guid being used is {matching_processes[6].process_guid}")
print("Test PASSED")
except IndexError:
print("Test FAILED")
print("----------------------------------------------------------")
return matching_processes[6].process_guid | 97a9a3df0570ee4f1777faa6385c7ddd8a013594 | 3,651,883 |
def home():
"""Home view"""
if flask.session.get('userid'):
leaderboard_players = rankedlist(
member=db.web.session.query(
models.Member).get(
flask.session['userid']))
member = db.web.session.query(
models.Member).get(
flask.session.get('userid'))
else:
leaderboard_players = rankedlist(
member=db.web.session.query(
models.Member).filter_by(
rank=3).first())
member = None
news = db.web.session.query(models.NewsArticle).order_by(
desc("date"))
return render_template('content_home.html', news=news, member=member,
leaderboard_players=leaderboard_players) | 89211f6b79eae71b757201a2d8b234000a3e42bf | 3,651,884 |
def is_number(string):
""" Tests if a string is valid float. """
try:
float(string)
return True
except ValueError:
return False | 1c46820de59b932ec565af55c565d175eef58c3c | 3,651,885 |
import os
def recursive_load_gfx(path, accept=(".png", ".bmp", ".svg")):
"""
Load graphics files.
This operates on a one folder at a time basis.
Note: An empty string doesn't count as invalid,
since that represents a folder name.
"""
colorkey = c.UGLY_PURPLE
graphics = {}
for pic in os.listdir(path):
pic_path = os.path.join(path, pic)
name, ext = os.path.splitext(pic)
if ext.lower() in accept:
img = pygame.image.load(pic_path)
if img.get_alpha():
#img = img.convert_alpha()
img.convert_alpha()
else:
img = img.convert()
img.set_colorkey(colorkey)
graphics[name] = img
elif not ext:
pass
else:
print("Got unexpected gfx format\n" \
"Path: {}\n" \
"Name: {}\n" \
"Ext: {}\n".format(pic_path, name, ext))
return graphics | 5daff8707aeeb2837e0a00be7a54b3d70ec3f917 | 3,651,886 |
def inputs(filename, batch_size, n_read_threads = 3, num_epochs = None, image_width = 200, image_height=290):
"""
reads the paired images for comparison
input: name of the file to load from, parameters of the loading process
output: the two images and the label (a logit classifier for 2 class - yes or no)
"""
with tf.device('/cpu:0'): #we need to load using the CPU or it allocated a stupid amount of memory
x1, x2, y_ = pc.input_pipeline([filename], batch_size, n_read_threads, num_epochs=num_epochs, imgwidth = image_width, imgheight = image_height)
return x1, x2, y_ | 1b22a3f5b28513a2f65312981205b2df40acd2b3 | 3,651,887 |
import logging
import os
def combinedlogger(
log_name,
log_level=logging.WARN,
syslogger_format="%(levelname)s %(message)s",
consolelogger_format="%(asctime)s %(levelname)s %(message)s",
):
"""
Returns a combined SysLogHandler/StreamHandler logging instance
with formatters
"""
if "LOGLEVEL" in os.environ:
log_level = os.environ["LOGLEVEL"]
try:
log_level = int(log_level)
except ValueError:
pass
# for writing to syslog
newlogger = logging.getLogger(log_name)
if syslogger_format and os.path.exists("/dev/log"):
my_syslog_formatter = logging.Formatter(
fmt=" ".join((log_name, syslogger_format))
)
my_syslog_handler = logging.handlers.SysLogHandler(
address="/dev/log",
facility=SysLogHandler.LOG_DAEMON,
)
my_syslog_handler.setFormatter(my_syslog_formatter)
newlogger.addHandler(my_syslog_handler)
if consolelogger_format:
my_stream_formatter = logging.Formatter(fmt=consolelogger_format)
my_stream_handler = logging.StreamHandler()
my_stream_handler.setFormatter(my_stream_formatter)
newlogger.addHandler(my_stream_handler)
newlogger.setLevel(log_level)
return newlogger | 45b6b63c1912f115a57afbf7ad2ba5478f12d548 | 3,651,888 |
def abs_p_diff(predict_table, categA='sandwich', categB='sushi'):
"""Calculates the absolute distance between two category predictions
:param predict_table: as returned by `predict_table`
:param categA: the first of two categories to compare
:param categB: the second of two categoreis to compare
:returns: series with the absolute difference between the predictions
:rtype: pandas Series
"""
return abs(predict_table['p_%s' % categA] - predict_table['p_%s' % categB]) | 235bfc7df29ac4a2b67baff9dfa3ee62204a9aed | 3,651,889 |
def _is_target_feature(column_names, column_mapping):
"""Assert that a feature only contains target columns if it contains any."""
column_names_set = set(column_names)
column_types = set(column['type']
for column_name, column in column_mapping.iteritems()
if column_name in column_names_set)
if 'target' in column_types:
assert len(column_types) == 1, (
'Features with target columns can only contain target columns.'
'Found column_types: %s for columns %s' % (column_types,
column_names))
return True
else:
return False | 098af45938c616dd0ff2483a27131f15ba50797b | 3,651,890 |
import typing
import json
import requests
def server_upload_document(path: str, title: str, peer: int, document_type: str = "doc") -> \
typing.Tuple[bool, typing.Union[str, typing.Any]]:
""" Uploads document to the server and returns it (as document string). """
try:
# Trying to upload document.
# Getting api for the uploader.
server_docs_api = SERVER_API.get_api().docs # noqa
# Getting upload url.
if "upload_url" in (upload_server := server_docs_api.getMessagesUploadServer(type=document_type, peer_id=peer)):
# If there is our URL.
# Get URL.
upload_url = upload_server["upload_url"]
else:
# If no.
# Error.
return False, "Upload Server Error" + str(upload_server)
# Posting file on the server.
request = json.loads(requests.post(upload_url, files={
"file": open(path, "rb")
}).text)
if "file" in request:
# If there is all fields in response.
# Saving file to the docs.
request = server_docs_api.save(file=request["file"], title=title, tags=[])
# Get fields.
document_id = request[document_type]["id"]
document_owner_id = request[document_type]["owner_id"]
# Returning document.
return True, f"doc{document_owner_id}_{document_id}"
# If there is not all fields.
# Debug message.
debug_message(f"[Server] Error when uploading document (Request)! Request - {request}")
# Returning request as error.
return False, "Request Error" + str(request)
except Exception as exception: # noqa, pylint: disable=broad-except, redefined-outer-name
# If there is error.
# Debug message.
debug_message(f"Error when uploading document (Exception)! Exception: {exception}")
# Returning exception.
return False, "Exception Error" + str(exception) | 2387dabd84c590a0eb3f6406d0b6742ccad4cb14 | 3,651,891 |
def _default_mono_text_dataset_hparams():
"""Returns hyperparameters of a mono text dataset with default values.
See :meth:`texar.MonoTextData.default_hparams` for details.
"""
return {
"files": [],
"compression_type": None,
"vocab_file": "",
"embedding_init": Embedding.default_hparams(),
"delimiter": " ",
"max_seq_length": None,
"length_filter_mode": "truncate",
"pad_to_max_seq_length": False,
"bos_token": SpecialTokens.BOS,
"eos_token": SpecialTokens.EOS,
"other_transformations": [],
"variable_utterance": False,
"utterance_delimiter": "|||",
"max_utterance_cnt": 5,
"data_name": None,
"@no_typecheck": ["files"]
} | bfd015cc93bd974b6486cf07cf72f1dfb7443b61 | 3,651,892 |
def validate_engine_mode(engine_mode):
"""
Validate database EngineMode for DBCluster
Property: DBCluster.EngineMode
"""
VALID_DB_ENGINE_MODES = (
"provisioned",
"serverless",
"parallelquery",
"global",
"multimaster",
)
if engine_mode not in VALID_DB_ENGINE_MODES:
raise ValueError(
"DBCluster EngineMode must be one of: %s" % ", ".join(VALID_DB_ENGINE_MODES)
)
return engine_mode | 69f7952a998b6ca593106c92710909104e21f55f | 3,651,893 |
import logging
def GetBlameListForV2Build(build):
""" Uses gitiles_commit from the previous build and current build to get
blame_list.
Args:
build (build_pb2.Build): All info about the build.
Returns:
(list of str): Blame_list of the build.
"""
search_builds_response = buildbucket_client.SearchV2BuildsOnBuilder(
build.builder, build_range=(None, build.id), page_size=2)
previous_build = None
for search_build in search_builds_response.builds:
# TODO(crbug.com/969124): remove the loop when SearchBuilds RPC works as
# expected.
if search_build.id != build.id:
previous_build = search_build
break
if not previous_build:
logging.error(
'No previous build found for build %d, cannot get blame list.',
build.id)
return []
repo_url = git.GetRepoUrlFromV2Build(build)
return git.GetCommitsBetweenRevisionsInOrder(
previous_build.input.gitiles_commit.id, build.input.gitiles_commit.id,
repo_url) | 622fb540d4cce2dc2f57c7a7276ebef546ebc766 | 3,651,894 |
from datetime import datetime
def run_command(cmd, log_method=log.info):
"""Subprocess wrapper for capturing output of processes to logs
"""
if isinstance(cmd, str):
cmd = cmd.split(" ")
start = datetime.utcnow()
log_method("Starting run_command for: {}".format(" ".join([str(x) for x in cmd])))
p = sp.Popen(cmd, bufsize=0, stdout=sp.PIPE, stderr=sp.STDOUT)
ret_val = None
while True:
line = p.stdout.readline()
ret_val = p.poll()
if not line and ret_val != None:
break
log_method(line.decode())
log_method("Completed run_command in {} for: {}".format((datetime.utcnow() - start).total_seconds(), " ".join(cmd)))
return ret_val | 8366c9306810d927daf82b473db86dc67b0d84c6 | 3,651,895 |
def jar(state: State, fail: Fail):
"""
Store a function by a name
"""
(identifier, (code, rest)) = state.infinite_stack()
if identifier.tag != "atom":
fail(f"{identifier} is not an atom")
if code.tag not in ["code", "native"]:
fail(f"{code} is not code")
if code.tag == "code":
code = code.with_name(identifier.value)
return state.with_stack(rest).set_name(identifier.value, code) | 5f60a30ff7bed1a453bfe6ff354dc34a6fabee4f | 3,651,896 |
import os
import argparse
def existant_file(filepath:str):
"""Argparse type, raising an error if given file does not exists"""
if not os.path.exists(filepath):
raise argparse.ArgumentTypeError(
" file {} doesn't exists".format(C_FILE + filepath + C_ENDC)
)
return filepath | dacf94aaeccb6b70974eafc87dff96609408ccb6 | 3,651,897 |
import requests
import json
def get_daily_activity(p_sap_date: str) -> dict:
""" Returns activities on the given date """
fiori_url = config.CONSTANTS["ECZ_DAHA_DAILY_URL"] + "?date=" + p_sap_date
resp = requests.get(
fiori_url,
auth=HTTPBasicAuth(
config.CONSTANTS["ECZ_DAHA_USER"],
config.CONSTANTS["ECZ_DAHA_PASS"]))
resp_as_dict = json.loads(resp.text)
return resp_as_dict | 68da0af50b0fc828d6eae3d1685911f039bd9732 | 3,651,898 |
import random
from typing import Optional
def load_example_abc(title: Optional[str] = None) -> str:
"""Load a random example ABC if `title` not provided.
Case ignored in the title.
"""
if title is None:
k = random.choice(list(examples))
else:
k = title.lower()
abc = examples.get(k)
if abc is None:
example_list = "\n".join(f" {t!r}" for t in examples)
raise ValueError("invalid tune title. Valid options are:\n" f"{example_list}")
return abc | d1deba6a03814da68c5d47a7018a6768059fef62 | 3,651,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.