content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import tkinter
def _colorvar_patch_destroy(fn):
"""Internal function.\n
Deletes the traces if any when widget is destroy."""
def _patch(self):
"""Interanl function."""
if self._tclCommands is not None:
# Deletes the widget from the _all_traces_colorvar
# and deletes the traces too.
for key, value in dict(_all_traces_colorvar).items():
if self == key[0]:
var, cbname = value
try:
var.trace_vdelete('w', cbname)
except tkinter.TclError:
pass
_all_traces_colorvar.pop(key)
return fn(self)
return _patch | d38380316932d8ff2fee8bed8b931b5567588774 | 3,654,800 |
import os
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
) | 154fa2158dc6e980c57c8b8bcea339fcadca772e | 3,654,801 |
def pres_from_hybrid(psfc, hya, hyb, p0=100000.):
"""Return pressure field on hybrid-sigma coordinates,
assuming formula is
p = a(k)*p0 + b(k)*ps.
"""
return hya*p0 + hyb*psfc | 4ebd90fb807ab9ea4c2b45d27da6f8b420c107f7 | 3,654,802 |
import urllib
def url_exist(file_url):
""" Check if an url exist
Parameters
----------
file_url : string
url of www location
Returns
-------
verdict : dtype=boolean
verdict if present
"""
try:
urllib.request.urlopen(file_url).code == 200
return True
except:
return False | 717ee7073ab56e8611eb46f042ab7c18f2db0f33 | 3,654,803 |
from scipy import stats
def chi_square(observed, expected):
"""
Compute the chi square test
"""
# glen cowan pp61
temp = []
for (n, nu) in zip(observed, expected):
if nu != 0:
temp += [((n - nu) ** 2) / nu]
# compute p value
mychi = sum(temp)
p = stats.chi2.sf(mychi, len(temp))
return mychi, p | 4b0577ec4e4b4dc6b99b00a54e78f1014b9cf93a | 3,654,804 |
def fix_deform_for_children(pmx: pmxstruct.Pmx, me: int, already_visited=None) -> int:
"""
Recursively ensure everything that inherits from the specified bone will deform after it.
Only cares about parent and partial-inherit, doesnt try to understand IK groups.
Return the number of bones that were changed.
:param pmx: full PMX object
:param me: int index of current bone
:param already_visited: leave empty, used to prevent recursive looping
:return: number of bones that were changed
"""
def guarantee_good_relationship(parent: int, child: int) -> bool:
# check & fix the relationship between these two bones
# if the deform layers are improper, then fix them and return True
# if the deform layers are proper, then return False
# todo: do i care about deform_after_phys?
child_deform = pmx.bones[child].deform_layer
parent_deform = pmx.bones[parent].deform_layer
if child < parent:
# if the child has lower index than parent, then the child MUST have greater deform_layer
if child_deform <= parent_deform:
pmx.bones[child].deform_layer = pmx.bones[parent].deform_layer + 1
return True
else:
return False
elif child > parent:
# if the child has greater index than parent, then the child MUST have greater (or equal) deform_layer
if child_deform < parent_deform:
pmx.bones[child].deform_layer = pmx.bones[parent].deform_layer
return True
else:
return False
else:
# if child == parent, idk? don't change anything tho
return False
retme = 0
# safety system to prevent infinite recursion:
if already_visited is None: already_visited = set()
if me in already_visited: return 0
else: already_visited.add(me)
# check every single bone to find the ones that inherit from "me"
for d,bone in enumerate(pmx.bones):
# if bone d is inheriting from "me",
if (bone.parent_idx == me) or ((bone.inherit_rot or bone.inherit_trans) and (bone.inherit_parent_idx == me)):
# check/fix their relationship. return True if something was changed.
if guarantee_good_relationship(me, d):
# the check also fixes it, all thats left is to recurse
print(d)
retme += 1
retme += fix_deform_for_children(pmx, d, already_visited)
return retme | 1757e8f4c39c2ac93f9ef63397b194412342fbae | 3,654,805 |
def theta_8(p, q, h, phi, a, b):
"""Lower limit of integration for the case rho > a, rho > b."""
result = np.arctan(r_8(p, q, phi, a, b)/h)
return(result) | 913ceb462885fba93cbdb6bddaa5523c119821bc | 3,654,806 |
def collect_genewise(fst_file, file_name, gene_names, gene_to_fst):
"""take in the file name, opens it.
populates a dictionary to [gene] = fst
file_name = defaultdict(str)
FBgn0031208 500000 16 0.002 21.0 1:2=0.05752690
"""
file_name = file_name.split("_gene")[0]
f_in = open(fst_file, "r")
for line in f_in:
if test_line(line):
data = line.split()
if "1:2=" in line:
gene = data[0].strip()
gene_names.add(gene)
fst = data[5].strip()
fst = fst.split("=")[1]
data = "%s\t%s" % (gene, fst)
gene_to_fst[file_name].append(data)
return gene_to_fst, gene_names | ea62da67a7084103859244bf7f192c2f4433124c | 3,654,807 |
import torch
def bbox_overlaps_batch(anchors, gt_boxes):
"""
:param anchors: (N, 4) ndarray of float
:param gt_boxes: (b, K, 5) ndarray of float
:return: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) - torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) - torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 4:
anchors = anchors[:,:,:4].contiguous()
else:
anchors = anchors[:,:,1:5].contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) - torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) - torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps | cc5a88e6a1d5cd42b1827091cbee311e4f33bbb6 | 3,654,808 |
from typing import Tuple
from typing import Callable
from typing import Any
import re
def extract_curve_and_test(curve_names: str, name: str) -> Tuple[str, Callable[[Any], bool]]:
"""Return a curve and a test to apply for which of it's components to twist."""
twist_match = re.match(rf"(?P<curve>[{curve_names}])_(?P<n>-?\d+)$", name)
twist_index_match = re.match(rf"(?P<curve>[{curve_names}])\[ *(?P<n>-?\d+) *\]$", name)
twist_slice_match = re.match(rf"(?P<curve>[{curve_names}])(\[ *(?P<start>-?\d*) *: *(?P<stop>-?\d*) *(: *(?P<step>-?\d*) *)?\])?$", name)
twist_expr_match = re.match(rf"(?P<curve>[{curve_names}])\{{(?P<expr>.*)\}}$", name)
if twist_match is not None:
parameters = twist_match.groupdict()
curve = parameters["curve"]
n = int(parameters["n"])
test = lambda edge: edge == n
elif twist_index_match is not None:
parameters = twist_index_match.groupdict()
curve = parameters["curve"]
n = int(parameters["n"])
test = lambda edge: edge == n
elif twist_slice_match is not None:
parameters = twist_slice_match.groupdict()
curve = parameters["curve"]
start = int(parameters["start"]) if parameters["start"] else -inf
stop = int(parameters["stop"]) if parameters["stop"] else inf
step = int(parameters["step"]) if parameters["step"] else 1
test = lambda edge: start <= edge < stop and (edge % step == (0 if start == -inf else start % step))
elif twist_expr_match is not None:
parameters = twist_expr_match.groupdict()
curve = parameters["curve"]
test = lambda n: eval(parameters["expr"], {"n": n, **globals()}) # pylint: disable=eval-used
else:
raise ValueError(f"Unknown mapping class {name}")
return curve, test | e4849ff7145bae0c2c900d0aa747ec7a14fb96ac | 3,654,809 |
import numpy
def psf_gaussian(psf_shape, psf_waist, psf_physical_size=1, psf_nphoton=2):
"""Return 3D gaussian approximation of PSF."""
def f(index):
s = psf_shape[index] // 2 * psf_physical_size
c = numpy.linspace(-s, s, psf_shape[index])
c *= c
c *= -2.0 / (psf_waist[index] * psf_waist[index])
return c
psf = numpy.exp(
numpy.sum(
numpy.meshgrid(f(0), f(1), f(2), indexing='ij', sparse=False),
axis=0,
)
)
if psf_nphoton != 1:
numpy.power(psf, psf_nphoton, out=psf)
return psf | 77ccab6aaa141564751a0eafd13398f904673006 | 3,654,810 |
def get_employee_record(id_num):
"""Gets an employee's details if record exists.
Arguments:
id_num -- ID of employee record to fetch
"""
if not id_num in names or not id_num in cities:
return 'Error viewing record'
return f'{id_num} {names[id_num]} {cities[id_num]}' | 108b6e3482022e8e65e09bda1dd8a78ca7850cfe | 3,654,811 |
def list_aliases():
"""
Gets the list of aliases for the current account. An account has at most one alias.
:return: The list of aliases for the account.
"""
try:
response = iam.meta.client.list_account_aliases()
aliases = response['AccountAliases']
if len(aliases) > 0:
logger.info("Got aliases for your account: %s.", ','.join(aliases))
else:
logger.info("Got no aliases for your account.")
except ClientError:
logger.exception("Couldn't list aliases for your account.")
raise
else:
return response['AccountAliases'] | 13fa5d4ded6811bbcbd6062cf7f690b08c41354e | 3,654,812 |
def MapToSingleIncrease(val):
"""
Need 30 minute values to be sequential for some of the tools(i.e. 1,2,3,4) so using a format
like 5,10,15,20 won't work.
"""
return val/5 | fe89d7ccb8bef511e2ad90a07ad0346c58ba894d | 3,654,813 |
def get_columns_for_table(instance, db, table):
""" Get a list of columns in a table
Args:
instance - a hostAddr object
db - a string which contains a name of a db
table - the name of the table to fetch columns
Returns
A list of columns
"""
conn = connect_mysql(instance)
cursor = conn.cursor()
ret = list()
param = {'db': db, 'table': table}
sql = ("SELECT COLUMN_NAME "
"FROM information_schema.columns "
"WHERE TABLE_SCHEMA=%(db)s AND"
" TABLE_NAME=%(table)s")
cursor.execute(sql, param)
for column in cursor.fetchall():
ret.append(column['COLUMN_NAME'])
return ret | 567a7e3e6ebbf33cee3cb088e1725bd2b11edcef | 3,654,814 |
def registra_aluno(nome, ano_entrada, ano_nascimento, **misc):
"""Cria a entrada do registro de um aluno."""
registro = {'nome': nome,
'ano_entrada': ano_entrada,
'ano_nascimento': ano_nascimento}
for key in misc:
registro[key] = misc[key]
return registro | e56da99ec90de9ebca204ccc3c3f3555b9bbbc64 | 3,654,815 |
def define_output_ports(docstring, short_description_word_count=4):
"""
Turn the 'Returns' fields into VisTrails output ports
Parameters
----------
docstring : NumpyDocString #List of strings?
The scraped docstring from the function being autowrapped into
vistrails
Returns
-------
input_ports : list
List of input_ports (Vistrails type IPort)
"""
output_ports = []
idx = 0
# now look at the return Returns section
for (the_name, the_type, the_description) in docstring['Returns']:
# when the return parameter has no name but only type and description
if the_type == '':
the_type = the_name
the_name = 'def_output' + str(idx)
idx += 1
base_type, is_optional = _type_optional(the_type)
if is_optional:
continue
type_base, is_enum, enum_list = _enum_type(the_type)
normed_type = None
# this is to deal with malformed docstrings like {array, scalar}
if is_enum and type_base == 'str':
try_norm = _normalize_type(' or '.join(enum_list))
if try_norm is not None:
is_enum = False
enum_list = []
logger.warning("abuse of enum %s | <%s>|",
docstring['Signature'], the_type)
normed_type = try_norm
# first try to parse
if normed_type is None:
normed_type = _normalize_type(type_base)
# deal with if we fail to parse
if normed_type is None:
raise AutowrapError("Malformed output type |{}: <{}>|".format(
the_name, the_type))
for port_name in (_.strip() for _ in the_name.split(',')):
if not port_name:
raise AutowrapError("A Port with no name")
pdict = {'name': port_name,
'signature': sig_map[normed_type]}
output_ports.append(pdict)
# some numpy functions lack a Returns section and have and 'output'
# optional input (mostly for in-place operations)
if len(output_ports) < 1:
for (the_name, the_type, the_description) in docstring['Parameters']:
if the_name.lower() in ['output', 'out']:
the_type, _ = _type_optional(the_type)
the_type = _normalize_type(the_type)
if the_type is None:
# TODO dillify
raise AutowrapError("Malformed type")
output_ports.append(dict(name=the_name,
signature=sig_map[the_type]))
return output_ports | c02737fd1eb29cc1570e47df32ecdb34e1467cea | 3,654,816 |
import signal
import errno
import os
def kill(pidfile, logger, signum=signal.SIGTERM):
"""Sends `signum` to the pid specified by `pidfile`.
Logs messages to `logger`. Returns True if the process is not running,
or signal was sent successfully. Returns False if the process for the
pidfile was running and there was an error sending the signal."""
daemon_pid = read_pid(pidfile, logger)
if daemon_pid is None:
return True
try:
send_signal(daemon_pid, signum, logger)
return True
except OSError as e:
if e.errno == errno.ESRCH:
logger.warning("Daemon not running (Stale lockfile)")
os.remove(pidfile)
return True
elif e.errno == errno.EPERM:
logger.error("Unable to kill %d (EPERM)", daemon_pid)
return False
raise | a2dd0ea54bb6e23dd0446646f41f3e34240d713e | 3,654,817 |
def create_small_table(small_dict):
"""
Create a small table using the keys of small_dict as headers. This is only
suitable for small dictionaries.
Args:
small_dict (dict): a result dictionary of only a few items.
Returns:
str: the table as a string.
"""
keys, values = tuple(zip(*small_dict.items()))
table = tabulate(
[values],
headers=keys,
tablefmt="pipe",
floatfmt=".3f",
stralign="center",
numalign="center",
)
return table | 08da78580fbf4cee8c30acb21ce7fa928a9c17b1 | 3,654,818 |
def get_normalized_list_for_every_month(variable_r, list_of_ranges_r, tags_r):
"""
:param variable_r: big list with all the data [sizes][months]
:param list_of_ranges_r: sorted list of range (sizes...Enormous, etc.)
:return: normalized list for each month (numbers are percentage respect to the total bytes/requests in a given month)
"""
number_of_months = len(tags_r)
temp_list = [[] for lil in range(0, number_of_months)]
total_requests_in_each_month = [[] for lil in range(0, number_of_months)]
maxima_each_month = [[] for lil in range(0, number_of_months)]
new_list_normalized = [[] for lil in range(0, number_of_months)]
for month in range(0, number_of_months):
for ciao in range(0, len(list_of_ranges_r), 1):
temp_list[month].append(variable_r[ciao][month]) # change second index to change the month: 0,1,2,...23
for month in range(0, number_of_months):
total_requests_in_each_month[month] = float(sum(temp_list[month]))
#print("total bytes requested in month 0: %f" % total_requests_in_each_month[0])
# list of maxima for each month
for month in range(0, number_of_months):
maxima_each_month[month] = max(temp_list[month])
#print("maxima for the first month: %d", maxima_each_month[0])
for month in range(0, number_of_months):
for zeta in temp_list[month]:
new_list_normalized[month].append((zeta/total_requests_in_each_month[month])*100)
return new_list_normalized | 4a3b837e6bf254dbd3255a8ca0a5d103d34bd2a9 | 3,654,819 |
def mark_as_possible_cluster_member(g, possible_cluster_member, cluster, confidence, system, uri_ref=None):
"""
Mark an entity or event as a possible member of a cluster.
:param rdflib.graph.Graph g: The underlying RDF model
:param rdflib.term.URIRef possible_cluster_member: The entity or event to mark as a possible
member of the specified cluster
:param rdflib.term.URIRef cluster: The cluster to associate with the possible cluster member
:param float confidence: The confidence with which to mark the cluster membership
:param rdflib.term.URIRef system: The system object for the system which marked the specified cluster
:param str uri_ref: A string URI representation of the cluster member (Default is None)
:returns: The cluster membership assertion
:rtype: rdflib.term.BNode
"""
cluster_member_assertion = _make_aif_resource(g, uri_ref, AIDA_ANNOTATION.ClusterMembership, system)
g.add((cluster_member_assertion, AIDA_ANNOTATION.cluster, cluster))
g.add((cluster_member_assertion, AIDA_ANNOTATION.clusterMember, possible_cluster_member))
mark_confidence(g, cluster_member_assertion, confidence, system)
return cluster_member_assertion | 851da7d12781723c7c2fb4bc13ac14172c890daf | 3,654,820 |
def twodcontourplot(tadata_nm, tadata_timedelay, tadata_z_corr):
"""
make contour plot
Args:
tadata_nm: wavelength array
tadata_timedelay: time delay array
tadata_z_corr: matrix of z values
"""
timedelayi, nmi = np.meshgrid(tadata_timedelay, tadata_nm)
# find the maximum and minimum
# these are used for color bar
z_min = np.amin(np.amin(tadata_z_corr, axis=1))
z_max = np.amax(np.amax(tadata_z_corr, axis=1))
return [nmi, timedelayi, z_min, z_max] | 2e8850e1c8153c9c307ff785ddd1d1d127163190 | 3,654,821 |
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags | ffc2fd47bbee7d2124da199d6b5101103500fbf2 | 3,654,822 |
def count_good_deals(df):
"""
7. Считает число прибыльных сделок
:param df: - датафрейм с колонкой '<DEAL_RESULT>'
:return: - число прибыльных сделок
"""
# http://stackoverflow.com/questions/27140860/count-occurrences-of-number-by-column-in-pandas-data-frame?rq=1
return (df['<DEAL_RESULT>'] > 0).sum() | 1f3ef9b9e0f7924d45d5ce84a77938f19386b6bc | 3,654,823 |
import time
import itertools
import sh
def match_lines_by_hausdorff(target_features, match_features, distance_tolerance,
azimuth_tolerance=None, length_tolerance=0, match_features_sindex=None, match_fields=False,
match_stats=False, field_suffixes=('', '_match'), match_strings=None, constrain_target_features=False,
target_features_sindex=None, match_vectors=False, expand_target_features=False,
closest_match=False, closest_target=False, verbose=False):
"""Conflate attributes between line features based on Hausdorff distance.
target_features : :class:`geopandas.GeoDataFrame`
Features to which ``match_features`` will be matched.
Must have LineString geometries.
All ``target_features`` will be included in output, with or without a match.
match_features : :class:`geopandas.GeoDataFrame`
Features to be matched to ``target_features``.
Must have LineString geometries.
Only successfully matched features will be included in output.
Multiple ``match_features`` may be matched to a single target feature.
Must have the same spatial reference as ``target_features``.
distance_tolerance : :obj:`float`
Maximum Hausdorff distance between each target feature and candidate ``match_features``
Because directed Hausdorff distances are calculated from target to match
and match to target, ``distance_tolerance`` will be assessed based on
the smaller of these two values.
If feature segments are matched (e.g., 1:n, m:1, or m:n),
Hausdorff distances are calculated for each segment.
In spatial unit of ``target_features``.
azimuth_tolerance : :obj:`float`, optional, default = ``None``
Maximum azimuth difference (in degrees) between target feature and potential match features.
Feature azimuths are calculated as the azimuth of the feature's "major axis"
(the longest axis of the feature's minimum bounding rectangle).
If feature segments are matched (e.g., 1:n, m:1, or m:n),
azimuths are calculated for each segment.
length_tolerance : :obj:`float`, optional, default = 0
Proportion of target feature length required for potential match features.
For example, 0.25 specifies that a match candidates must be at least 25% as long as
target features to be viable matches.
Must be between 0 and 1. If target and match features are split, length proportions
are calculated between split segments, not original features.
match_features_sindex : :class:`rtree.index.Index`, optional, default = ``None``
Spatial index for ``match_features``.
If provided, will not have to be constructed for each function call.
match_fields : :obj:`bool`, optional, default = ``False``
* ``True``: Fields from match features will be included in output.
* ``False``: Only row indices for match features will be included in output.
match_stats : :obj:`bool`, optional, default = ``False``
* ``True``: Statistics related to tolerances will be included in output.
* ``False``: No match statistics will be included in ouput.
field_suffixes : :obj:`tuple`, optional, default = ``('', '_match')``
Suffixes to be appended to output field names for ``target_features``
and ``match_features``, respectively.
Only used if ``match_stats=True``.
match_strings : :obj:`tuple`, optional, default = ``None``
Fields used to compute fuzzy string comparisions.
Typically, these are street name fields for the ``target_features``
and ``match_features``, respectively.
String comparisions do not affect matches, but can be post-processed to
help assess match quality.
constrain_target_features : :obj:`bool`, optional, default = ``False``
* ``True``: Extents of ``match_features``, plus a ``distance_tolerance`` buffer,
will be used to select relevent ``target_features`` prior to matching.
When the extent or number of ``match_features`` is small relative to
``target_features``, this dramatically improves performance because fewer
``target_features`` are analyzed for potential matches.
* ``False``: All ``target_features`` are analyzed for potential matches.
target_features_sindex : :class:`rtree.index.Index`, optional, default = ``None``
If ``constrain_target_features=True``, a spatial index for the ``target_features``
will be computed unless one is provided. If the same ``target_features`` are specified
in multiple function calls, pre-computing a spatial index will improve performance.
If ``constrain_target_features=False``, ``target_features_sindex`` is unnecessary.
match_vectors : :obj:`bool`, optional, default = ``False``
* ``True``: Constructs LineStrings between midpoint of ``target_features`` and the
closest points along matched ``match_features``. Useful for vizualizing match results.
expand_target_features : :obj:`bool`, optional, default = ``False``
* ``True`` : Target features that match to multiple ``match_features`` will be expanded
into multiple segments, each corresponding to a single match feature. Each target feature
segment will be output as a seperate record with an index field identifying original
row-wise indices from ``target_features``.
closest_match : :obj:`bool`, optional, default = ``False``
* ``True`` : Only the closest available match feature will be matched to each target
feature, based on Hausdorff distance
* ``False`` : All available match features will match to each target feature
closest_target : :obj:`bool`, optional, default = ``False``
* ``True`` : A target feature will only match with a match feature if it is the closest
available target, based on Hausdorff distance
* ``False`` : A target feature will match with all available match features, regardless
of whether it has also matched with other target features
verbose : :obj:`bool`, optional, default = ``False``
* ``True`` : Reports status by printing to standard output
"""
# Copy input features to the function doesn't modify the originals
target_features = target_features.copy()
match_features = match_features.copy()
original_target_feature_columns = target_features.columns
original_crs = target_features.crs
if verbose:
start = time()
length = len(target_features)
counter = 0
# Constrain target features to those near available match features
if constrain_target_features:
if not target_features_sindex:
target_features_sindex = target_features.sindex
nearby_target_idx = []
for match_feature in match_features.geometry:
nearby_target_idx.extend(
list(target_features_sindex.intersection(
match_feature.buffer(distance_tolerance).bounds)))
nearby_target_idx = list(set(nearby_target_idx))
operating_target_features = target_features[['geometry']].iloc[nearby_target_idx].copy()
else:
operating_target_features = target_features[['geometry']].copy()
# Make a spatial index for match features, if one isn't supplied
if not match_features_sindex:
match_features_sindex = match_features.sindex
# Initiate lists to store match results
match_indices = []
match_types = []
h_tms_matches = []
t_props_matches = []
t_segs_matches = []
t_linrefs_matches = []
h_mts_matches = []
m_props_matches = []
m_segs_matches = []
m_linrefs_matches = []
if match_vectors:
match_vectors = []
# Iterate through target features:
for i, target in enumerate(operating_target_features.geometry):
# Initiate lists to store matches
m_ids = []
m_types = []
h_tms = []
t_props = []
t_segs = []
t_linrefs = []
h_mts = []
m_props = []
m_segs = []
m_linrefs = []
# Only analyze targets with length
if target.length > 0:
# Roughly filter candidates with a spatial index
search_area = target.buffer(distance_tolerance).bounds
candidate_IDs = list(match_features_sindex.intersection(search_area))
candidates = match_features[['geometry']].iloc[candidate_IDs].reset_index()
# Calculate Hausdorff distances from feature to each candidate (h_fc)
h_tm_list = [directed_hausdorff(target, candidate) for candidate in candidates.geometry]
candidates['h_tm'] = pd.Series(h_tm_list)
# Calculate Hausdorff distances from each candidate to feature (h_cf)
h_mt_list = [directed_hausdorff(candidate, target) for candidate in candidates.geometry]
candidates['h_mt'] = pd.Series(h_mt_list)
# Define function to compare major axis azimuths
def azimuth_match(target, candidate, azimuth_tolerance):
if azimuth_tolerance:
target_azimuth = major_axis_azimuth(target)
candidate_azimuth = major_axis_azimuth(candidate)
azimuth_difference_ = azimuth_difference(target_azimuth, candidate_azimuth, directional=False)
if azimuth_difference_ <= azimuth_tolerance:
return True
else:
return False
else:
return True
# Examine each candidate's relationship to the target feature
for candidate in candidates.itertuples():
# Only analyze candidates with length
if candidate.geometry.length > 0:
# Initialize default match values
m_type = None
h_tm = None
t_prop = None
t_seg = None
t_linref = None
h_mt = None
m_prop = None
m_seg = None
m_linref = None
# 1:1
if (
(candidate.h_tm <= distance_tolerance) and
(candidate.h_mt <= distance_tolerance) and
# Check that azimuth is acceptable
azimuth_match(target, candidate.geometry, azimuth_tolerance) and
# Check relative length
(abs(candidate.geometry.length - target.length) <
(1- length_tolerance) * target.length)):
# Whole target matches candidate
h_tm = candidate.h_tm
t_prop = 1
t_seg = target
t_linref = (0, target.length)
# Whole candidate matches target
h_mt = candidate.h_mt
m_prop = 1
m_seg = candidate.geometry
m_linref = (0, candidate.geometry.length)
m_type = '1:1'
# m:1
elif (
(candidate.h_tm <= distance_tolerance) and
(candidate.h_mt > distance_tolerance)):
# Find the candidate segment matching the target
candidate_seg = find_parallel_segment(target, candidate.geometry)
if (candidate_seg and
candidate_seg.length > 0 and
azimuth_match(target, candidate_seg, azimuth_tolerance) and
# Check relative length
(abs(candidate_seg.length - target.length) <
(1- length_tolerance) * target.length)):
# Whole target matches candidate
h_tm = directed_hausdorff(target, candidate_seg)
t_prop = 1
t_seg = target
t_linref = (0, target.length)
# Calculate proportion of candidate included in segment
h_mt = directed_hausdorff(candidate_seg, target)
m_prop = candidate_seg.length / candidate.geometry.length
m_seg = candidate_seg
m_linref = segment_linear_reference(candidate.geometry, candidate_seg)
m_type = 'm:1'
# 1:n
elif (
(candidate.h_tm > distance_tolerance) and
(candidate.h_mt <= distance_tolerance)):
# Find the target segment matching the candidate
target_seg = find_parallel_segment(
candidate.geometry, target, snap_distance=distance_tolerance)
if (target_seg and
target_seg.length > 0 and
azimuth_match(target_seg, candidate.geometry, azimuth_tolerance) and
# Check relative length
(abs(candidate.geometry.length - target_seg.length) <
(1- length_tolerance) * target_seg.length)):
# Calculate proportion of target included in segment
h_tm = directed_hausdorff(target_seg, candidate.geometry)
t_prop = target_seg.length / target.length
t_seg = target_seg
t_linref = segment_linear_reference(target, target_seg)
# Whole candidate matches target
h_mt = directed_hausdorff(candidate.geometry, target_seg)
m_prop = 1
m_seg = candidate.geometry
m_linref = (0, candidate.geometry.length)
m_type = '1:n'
# potential m:n
elif (
(candidate.h_tm > distance_tolerance) and
(candidate.h_mt > distance_tolerance)):
# See if parallel segments can be identified
target_seg = find_parallel_segment(
candidate.geometry, target, snap_distance=distance_tolerance)
candidate_seg = find_parallel_segment(
target, candidate.geometry)
# Measure hausdorff distance (non-directed) between parallel segments
if target_seg and candidate_seg:
h_tm_seg = directed_hausdorff(target_seg, candidate_seg)
h_mt_seg = directed_hausdorff(candidate_seg, target_seg)
if ((h_tm_seg <= distance_tolerance) and
(h_mt_seg <= distance_tolerance) and
target_seg.length > 0 and
candidate_seg.length > 0 and
azimuth_match(target_seg, candidate_seg, azimuth_tolerance) and
# Check relative length
(abs(candidate_seg.length - target_seg.length) <
(1- length_tolerance) * target_seg.length)):
h_tm = h_tm_seg
t_prop = target_seg.length / target.length
t_seg = target_seg
t_linref = segment_linear_reference(target, target_seg)
h_mt = h_mt_seg
m_prop = candidate_seg.length / candidate.geometry.length
m_seg = candidate_seg
m_linref = segment_linear_reference(candidate.geometry, candidate_seg)
m_type = 'm:n'
if t_prop is not None:
m_ids.append(candidate.index)
m_types.append(m_type)
h_tms.append(h_tm)
t_props.append(t_prop)
t_segs.append(t_seg)
t_linrefs.append(t_linref)
h_mts.append(h_mt)
m_props.append(m_prop)
m_segs.append(m_seg)
m_linrefs.append(m_linref)
# Record match stats
match_indices.append(m_ids)
match_types.append(m_types)
h_tms_matches.append(h_tms)
t_props_matches.append(t_props)
t_segs_matches.append(t_segs)
t_linrefs_matches.append(t_linrefs)
h_mts_matches.append(h_mts)
m_props_matches.append(m_props)
m_segs_matches.append(m_segs)
m_linrefs_matches.append(m_linrefs)
# Construct match vector
if isinstance(match_vectors, list):
vectors = []
for t_seg, m_seg in zip(t_segs_matches, m_segs_matches):
if t_seg and m_seg:
vectors.append(LineString([midpoint(t_seg), midpoint(m_seg)]))
match_vectors.append(vectors)
# Report status
if verbose:
if counter % round(length / 10) == 0 and counter > 0:
percent_complete = (counter // round(length / 10)) * 10
minutes = (time()-start) / 60
print('{}% ({} segments) complete after {:04.2f} minutes'.format(percent_complete, counter, minutes))
counter += 1
# Merge joined data with target features
operating_target_features['match_index'] = pd.Series(
match_indices, index=operating_target_features.index)
operating_target_features['match_type'] = pd.Series(
match_types, index=operating_target_features.index)
operating_target_features['h_tm'] = pd.Series(
h_tms_matches, index=operating_target_features.index)
operating_target_features['t_prop'] = pd.Series(
t_props_matches, index=operating_target_features.index)
operating_target_features['t_seg'] = pd.Series(
t_segs_matches, index=operating_target_features.index)
operating_target_features['t_linref'] = pd.Series(
t_linrefs_matches, index=operating_target_features.index)
operating_target_features['h_mt'] = pd.Series(
h_mts_matches, index=operating_target_features.index)
operating_target_features['m_prop'] = pd.Series(
m_props_matches, index=operating_target_features.index)
operating_target_features['m_seg'] = pd.Series(
m_segs_matches, index=operating_target_features.index)
operating_target_features['m_linref'] = pd.Series(
m_linrefs_matches, index=operating_target_features.index)
if isinstance(match_vectors, list):
operating_target_features['match_vectors'] = pd.Series(
match_vectors, index=operating_target_features.index)
# Store original target feature IDs
operating_target_features = operating_target_features.reset_index().rename(columns={'index': 'target_index'})
# Expand targets with more than one match
# Look for lists of match IDs in each row
expanded_targets = []
for i, target in enumerate(operating_target_features.itertuples()):
if isinstance(target.match_index, list):
# Make duplicate rows for each match ID with respective attributes
for j, match in enumerate(target.match_index):
new_row = target._asdict()
new_row.pop('Index', None)
for key, value in target._asdict().items():
if isinstance(value, list):
new_row[key] = value[j]
# Append new row to end of dataframe
operating_target_features = operating_target_features.append(new_row, ignore_index=True)
# Mark original row for deletion
expanded_targets.append(i)
# Delete expanded targets
operating_target_features = operating_target_features.drop(expanded_targets)
# Only analyze matches if there are any
if len(operating_target_features) > 0:
# Identify and add records for unmatched portions of target features
# Get target records that have unmatched portions
unmatched_segments = operating_target_features.copy()
unmatched_segments = unmatched_segments[
(unmatched_segments['t_prop'].notnull()) &
(unmatched_segments['t_prop'] < 1)]
new_target_records = []
# Iterate through groups of target records
for target_index, target_group in unmatched_segments.groupby('target_index'):
# Get the linref intervals associated with each of the matched segments
matched_linrefs = target_group['t_linref'].tolist()
# Combine the intervals
matched_linrefs_merged = merge_intervals(matched_linrefs)
# Get the original target geometry
orig_target_geometry = target_group.iloc[0]['geometry']
# Construct linref intervals for the unmatched parts
geometry_extents = [0, orig_target_geometry.length]
matched_linrefs_list = [linref for tup in matched_linrefs_merged for linref in tup]
all_linrefs_list = sorted(geometry_extents + matched_linrefs_list)
unmatched_linrefs = [
(all_linrefs_list[i], all_linrefs_list[i + 1])
for i in range(0, len(all_linrefs_list), 2)]
unmatched_lines = [
split_line_at_dists(orig_target_geometry, pair)[1]
for pair in unmatched_linrefs]
# For each unmatched line, make a new target record
for unmatched_line, unmatched_linref in zip(unmatched_lines, unmatched_linrefs):
if unmatched_line.length > 1:
# Get all the attributes associated with the original target record
new_target_record = target_group.iloc[0].to_dict()
# Modify the match attributes
new_target_record['match_index'] = np.nan
new_target_record['match_type'] = np.nan
new_target_record['h_tm'] = np.nan
new_target_record['t_prop'] = np.nan
new_target_record['t_seg'] = unmatched_line
new_target_record['t_linref'] = unmatched_linref
new_target_record['h_mt'] = np.nan
new_target_record['m_prop'] = np.nan
new_target_record['m_seg'] = np.nan
new_target_record['m_linref'] = np.nan
new_target_record['geometry'] = orig_target_geometry
new_target_records.append(new_target_record)
# Add new target records to operating features
new_target_records = gpd.GeoDataFrame(new_target_records, geometry='geometry')
operating_target_features = pd.concat([operating_target_features, new_target_records])
# Replace target geometries with target segments (if not NaN)
##### This appears to be duplicated below; not sure if it needs to happen twice
operating_target_features['geometry'] = operating_target_features.apply(
lambda row: row['t_seg'] if isinstance(row['t_seg'], LineString) else row['geometry'], axis=1)
# For each unique target geometry, delete all matches except the closest one
# (expanded targets are deleted if they don't have the closest match)
# Required if 'closest_target'
if closest_match or closest_target:
# Identify sets of records with identical targets
equivalent_target_sets = [d for _, d in operating_target_features.groupby(
['target_index','t_linref']) if len(d) > 1]
# Identify which of these records has the closest match
equivalent_record_ids = []
closest_records = gpd.GeoDataFrame(crs=operating_target_features.crs)
for equivalent_target_set in equivalent_target_sets:
# Keep track of IDs for equivalent records
equivalent_record_ids.extend(equivalent_target_set.index.tolist())
# Identify minimum tc and ct distances and associated indices
h_tm_min_idx = equivalent_target_set['h_tm'].astype(float).idxmin()
h_tm_min = equivalent_target_set['h_tm'].astype(float).min()
h_mt_min_idx = equivalent_target_set['h_mt'].astype(float).idxmin()
h_mt_min = equivalent_target_set['h_mt'].astype(float).min()
# Identify overall closest match
min_idx = h_tm_min_idx if h_tm_min < h_mt_min else h_mt_min_idx
closest_records = closest_records.append(
operating_target_features.loc[[min_idx]], ignore_index=True)
# Drop equivalent records
operating_target_features = operating_target_features.drop(
equivalent_record_ids)
# Add back those with the closest match
operating_target_features = operating_target_features.append(
closest_records, ignore_index=True)
# Ensure that each match feature is only matched to one, closest target feature
# (No targets are deleted, but matches are removed if a given target isn't closest)
if closest_target:
# Identify sets of records with the same match id
match_id_sets = [d for _, d in operating_target_features.groupby(
'match_index') if len(d) > 1]
# Within these sets, identify sets with overlapping linear references
for match_id_set in match_id_sets:
# Get ID for match feature
match_id = match_id_set.iloc[0]['match_index']
# Get raw geometry for match feature
match_geom = match_features.loc[match_id]['geometry']
# Find overlapping linear reference ranges among the original matches
lin_ref_ranges = merge_intervals(match_id_set['m_linref'].tolist())
# Identify sets of records within each range
lin_ref_sets = [match_id_set[match_id_set['m_linref'].apply(
lambda x: True if (x[0] >= lower and x[1] <= upper) else False)]
for lower, upper in lin_ref_ranges]
# Analyze each set of targets with overlapping matches
for lin_ref_set, lin_ref_range in zip(lin_ref_sets, lin_ref_ranges):
# Get the portion of the raw match feature within the linear reference range
_, range_match_geom, _ = split_line_at_dists(match_geom, lin_ref_range)
# Split the linear reference feature into segments parallel to match features
t_seg_endpoints = [x for t_seg in lin_ref_set['t_seg'] for x in endpoints(t_seg)]
t_seg_endpoint_lin_refs = [range_match_geom.project(x) for x in t_seg_endpoints]
range_match_segments = split_line_at_dists(range_match_geom, t_seg_endpoint_lin_refs)
# For each segment, see which target feature is closest based on hausdorff distance
closest_targets = [
nearest_neighbor(
segment,
GeoDataFrame(geometry=lin_ref_set['t_seg']),
hausdorff_distance=True
).index[0]
for segment in range_match_segments]
# Group adjacent segments with the same target
groups = [list(group) for _, group in itertools.groupby(
zip(closest_targets, range_match_segments), key=lambda x: x[0])]
closest_targets = [group[0][0] for group in groups]
match_segments = [[x[1] for x in group] for group in groups]
match_segments = [sh.ops.linemerge(x) for x in match_segments]
# Only move forward if there are match LineString match segments to work with
if LineString in [type(x) for x in match_segments]:
# Remove any non-LineString geometries (e.g., GeometryCollection)
try:
match_segments, closest_targets = zip(
*[(segment, idx) for segment, idx
in zip(match_segments, closest_targets)
if isinstance(segment, LineString)])
except:
match_segment_types = [type(x) for x in match_segments]
closest_target_types = [type(x) for x in closest_targets]
print('match segments: {}, {}'.format(str(match_segment_types), str(match_segments)))
print('closest_targets: {}, {}'.format(str(closest_target_types), str(closest_targets)))
# Calculate the match prop and lin_ref bounds for the grouped match segments
match_props = [x.length/match_geom.length for x in match_segments]
match_lin_refs = [tuple([match_geom.project(point) for point in endpoints(segment)])
for segment in match_segments]
# Update match info for the chosen target
for idx, match_prop, match_segment, match_lin_ref in zip(
closest_targets, match_props, match_segments, match_lin_refs):
# lin_ref_set.at[idx, 'match_index'] = match_id
lin_ref_set.at[idx, 'm_prop'] = match_prop
lin_ref_set.at[idx, 'm_seg'] = match_segment
lin_ref_set.at[idx, 'm_linref'] = match_lin_ref
lin_ref_set.at[idx, 'h_tm'] = directed_hausdorff(
lin_ref_set.at[idx, 't_seg'], match_segment)
lin_ref_set.at[idx, 'h_mt'] = directed_hausdorff(
match_segment, lin_ref_set.at[idx, 't_seg'])
# Remove match info for other targets in set
not_closest_targets = [x for x in lin_ref_set.index
if x not in closest_targets]
for idx in not_closest_targets:
lin_ref_set.at[idx, 't_prop'] = np.nan
# lin_ref_set.at[lin_ref_set_idx, 't_seg'] = np.nan ########### Maybe don't get rid of the t_seg?
lin_ref_set.at[idx, 't_linref'] = np.nan
lin_ref_set.at[idx, 'm_prop'] = np.nan
lin_ref_set.at[idx, 'm_seg'] = np.nan
lin_ref_set.at[idx, 'm_linref'] = np.nan
lin_ref_set.at[idx, 'h_tm'] = np.nan
lin_ref_set.at[idx, 'h_mt'] = np.nan
lin_ref_set.at[idx, 'match_index'] = np.nan
# Remove original lin_ref_set rows from the operating_target_features
operating_target_features = operating_target_features.drop(lin_ref_set.index)
# Append rows from lin_ref_set back onto operating_target_features
operating_target_features = operating_target_features.append(lin_ref_set)
# Gather values from fields of match features
if match_fields and isinstance(match_fields, bool):
match_fields = match_features.columns.tolist()
match_fields.remove('geometry')
elif isinstance(match_fields, list):
match_fields = match_fields
else:
match_fields = []
if match_strings and (match_strings[1] not in match_fields):
match_fields.append(match_strings[1])
# Join fields for matches
operating_target_features = operating_target_features.merge(
match_features[match_fields], how='left', left_on='match_index', right_index=True)
# Join operating target features back onto all target features
target_features = target_features.merge(
operating_target_features.drop(columns=['geometry']),
how='outer', left_index=True, right_on='target_index', suffixes=field_suffixes)
# Sort by original index
target_features = target_features.sort_values(['target_index'])
# Convert empty lists to NaN
target_features = target_features.applymap(
lambda x: np.nan if x == [] else x)
# Convert single-element lists to their sole elements
target_features = target_features.applymap(
lambda x: x[0] if (isinstance(x, list) and len(x) == 1) else x)
# Calculate string matches, if specified
if match_strings:
def fuzzy_score(row, col_a, col_b):
a = row[col_a]
b = row[col_b]
def standardize_and_score(a, b):
a = standardize_streetname(str(a))
b = standardize_streetname(str(b))
return (fuzz.token_set_ratio(a, b) / 100)
# Inputs could be lists, so make them lists if they aren't
a_list = listify(a)
b_list = listify(b)
# Get fuzzy scores for each string combination
scores = []
for a in a_list:
for b in b_list:
if (pd.notnull(a) and pd.notnull(b)):
scores.append(standardize_and_score(a, b))
if len(scores) > 0:
return scores
else:
return np.nan
target_string, match_string = match_strings
if match_string in original_target_feature_columns:
target_string = target_string + field_suffixes[0]
match_string = match_string + field_suffixes[1]
target_features['match_strings'] = target_features.apply(
fuzzy_score, args=(target_string, match_string), axis=1)
# Replace geometry with t_seg if there is one available
target_features['geometry'] = target_features.apply(
lambda row: row['t_seg'] if isinstance(row['t_seg'], LineString) else row['geometry'], axis=1)
# Drop stats columns if not specifically requested
if not match_stats:
target_features = target_features.drop(
columns=['h_tm','t_prop','t_seg','t_linref','h_mt','m_prop','m_seg','m_linref'])
# Move target index to front
target_features = df_first_column(target_features, 'target_index')
# Move the geometry column to the end
target_features = df_last_column(target_features, 'geometry')
# Reset the index
target_features = target_features.reset_index(drop=True)
# Ensure that crs is the same as original
target_features.crs = original_crs
# Report done
if verbose:
print('100% ({} segments) complete after {:04.2f} minutes'.format(counter, (time()-start) / 60))
return target_features | d9c08e8e156a525495a03e5e9d6881c33ecdf0a2 | 3,654,824 |
import os
from typing import Callable
import stat
import shutil
def git_rmtree(path: os.PathLike) -> None:
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func: Callable, path: os.PathLike, _) -> None:
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
try:
func(path) # Will scream if still not possible to delete.
except Exception:
raise
return shutil.rmtree(path, False, onerror) | 25471f9f791ef091106915cec2f5e16bf91b067c | 3,654,825 |
def get_num_streams():
"""Force an offset so high that the payload is small and quick.
In it, there will be a total number to base our reverse search from"""
result = get_streams()
logger.debug(result)
if "error" in result:
raise Exception("error in request: " + str(result))
total = int(result.get('_total', 0))
logger.info("Total live streams: %d", total)
return total | bae59352d4962b0916e8cf09215ea108fb3e8948 | 3,654,826 |
import os
def compute_corr_active(params):
""" Compute correlation only for active positions, i.e. where
at least one of the two signal tracks is non-zero
:param params:
:return:
"""
with pd.HDFStore(params['inputfilea'], 'r') as hdf:
load_group = os.path.join(params['inputgroupa'], params['chrom'])
data = hdf[load_group].values
dataset1 = np.ma.masked_where(data > 0, data)
with pd.HDFStore(params['inputfileb'], 'r') as hdf:
load_group = os.path.join(params['inputgroupb'], params['chrom'])
data = hdf[load_group].values
dataset2 = np.ma.masked_where(data > 0, data)
comb_mask = np.ma.getmask(dataset1) & np.ma.getmask(dataset2)
dataset1 = np.ma.array(dataset1.data, mask=comb_mask)
dataset2 = np.ma.array(dataset2.data, mask=comb_mask)
results = dict()
for ms in params['measure']:
corr_fun = get_corr_fun(params[ms], masked=True)
res = corr_fun(dataset1, dataset2)
try:
corr, pv = res
except (ValueError, TypeError):
corr, pv = res, -1
infos = {'stat': corr, 'pv': pv.data}
results[ms] = infos
return params['chrom'], results | 4d26eaab548aa02521c2cb0341e93803315e46bf | 3,654,827 |
def create_anchors_3d_stride(grid_size,
voxel_size=[0.16, 0.16, 0.5],
coordinates_offsets=[0, -19.84, -2.5],
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = voxel_size
x_offset, y_offset, z_offset = coordinates_offsets
x_centers = np.arange(grid_size[0], dtype=dtype)
y_centers = np.arange(grid_size[1], dtype=dtype)
z_centers = np.arange(grid_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset + 0.25
y_centers = y_centers * y_stride + y_offset + 0.08
x_centers = x_centers * x_stride + x_offset + 0.08
xx, yy, zz = np.meshgrid(x_centers, y_centers, z_centers)
sizes = np.stack((xx, yy , zz), axis=-1)
sizes = np.reshape(sizes, [-1,3])
return sizes | 129e54a855bbacb2026eb08b5741ab70dd0374f4 | 3,654,828 |
def combined_roidb(imdb_names):
"""
Combine multiple roidbs
"""
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method("gt")
print('Set proposal method: {:s}'.format("gt"))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = imdb2(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb | 4660a6ffff11511c449629c9fdb7f5d566a886f9 | 3,654,829 |
from re import A
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item | dbec0b41b16fa48996a735372bfb001b386c7300 | 3,654,830 |
import larch
import sys
def enable_plugins():
"""add all available Larch plugin paths
"""
if 'larch_plugins' not in sys.modules:
sys.modules['larch_plugins'] = larch
return sys.modules['larch_plugins'] | a58100359d749d876976e8bb13b6766edf6ea8b3 | 3,654,831 |
def exception_log_and_respond(exception, logger, message, status_code):
"""Log an error and send jsonified respond."""
logger.error(message, exc_info=True)
return make_response(
message,
status_code,
dict(exception_type=type(exception).__name__, exception_message=str(exception)),
) | c784efd4b8adbbc463ff1d2a499ffd598253349d | 3,654,832 |
import re
def parse_cdhit_clusters(cluster_file):
"""
Parses cdhit output into three collections in a named tuple:
clusters: list of lists of gene ids.
reps: list of representative gene for each cluster
lookup: dict mapping from gene names to cluster index
In this setup, cluster ids are the position in either of the
first two lists.
"""
# re-call with file-like object if we are give an path
if isinstance(cluster_file, str):
with open(cluster_file) as cluster_handle:
return parse_cdhit_clusters(cluster_handle)
# initialize final containers
clusters = []
cluster_reps = []
cluster_lookup = {}
# expression for parsing cluster line (captures gene name and alignment)
gene_expr = re.compile(r"\s>(\S+)\.\.\.\s\s*(.+)\s*$")
# loop over lines
for line in cluster_file:
if line.startswith(">"):
# create a new cluster
cluster = []
cluster_id = len(clusters)
clusters.append(cluster)
continue
# parse gene name from line
gene, alignment = gene_expr.search(line).groups()
if alignment.strip() == "*":
cluster_reps.append(gene)
cluster_lookup[gene] = cluster_id
cluster.append(gene)
# done
return CdhitClusters(clusters, cluster_reps, cluster_lookup) | fe0634c1991f0bd687f8be675ff15cb3290c919c | 3,654,833 |
import torch
def evaluate(model: nn.Module, dataloader: DataLoader) -> Scores:
"""
Evaluate a model without gradient calculation
:param model: instance of a model
:param dataloader: dataloader to evaluate the model on
:return: tuple of (accuracy, loss) values
"""
score = 0
loss = 0
loss_func = nn.LogSoftmax(dim=1).to("cuda")
for i, x in enumerate(dataloader):
img = x[0]
ans = x[1]
ques = x[2]
if torch.cuda.is_available():
img = img.cuda()
ans = ans.cuda()
ques = ques.cuda()
y_hat = model((img, ques))
img = None
ques = None
nll = -loss_func(y_hat)
score += train_utils.batch_accuracy(y_hat, ans.data).sum()
ans = answer_norm(ans)
loss += (nll * ans).sum(dim=1).mean()
loss /= len(dataloader.dataset)
score /= len(dataloader.dataset)
score *= 100
print("val loss = ", loss)
return score, loss | f23fbd72a24122b3a665f29918c52bbd5515d204 | 3,654,834 |
from operator import and_
def remote_judge_get_problem_info(problem_id: str, contest_id: int = -1, contest_problem_id: int = -1):
"""
{
"code":0,
"data":{
"isContest":"是否在比赛中",
"problemData":{
"title":"题目名",
"content":"题目内容",
"background":"题目背景",
"inputFormat":"输入格式",
"outputFormat":'输出格式',
"examples":[{"input":"样例输入","output":"样例输出"}],
"createTime":"创建时间",
"uploaderProfile":{
"uid":"用户ID",
"username":"用户名"
},
"remoteProblemID":"远程题目ID",
"remoteOJ":{
"id":"远程OJID",
"display":"远程OJ显示名",
"availableLanguages":[
{"id":"0","display":"C++"}
]
},
"public":"是否公开",
"hint":"提示",
"recentDiscussions":[
{
"id":123,
"title":"qw"
}
],
"acceptedCount":"",
"submissionCount":""
},
"userData":{
"lastCode":"上次提交的代码",
"lastLanguage":"上次选择的语言",
"status":"qwq",
"id":"",
"accounts":{
"id":{
"username":"用户名",
"oj":"OJ",
"accountID":"ID"
}
}
}
}
}
"""
# in_contest = contest_id != -1
contest: Contest = Contest.by_id(contest_id)
if contest:
# pass
if not contest.running() and not permission_manager.has_permission(session.get("uid"), "contest.manage"):
return make_response(-1, message="你没有权限查看此题目")
print(contest_problem_id,"contest_problem_id")
problem: Problem = db.session.query(Problem).filter(
Problem.id == contest.problems[contest_problem_id]["id"]).one_or_none()
else:
problem: Problem = db.session.query(Problem).filter(
Problem.id == problem_id).one_or_none()
if not permission_manager.has_permission(session.get("uid"), "remote_judge.use") and problem.uploader_id != int(session.get("uid")):
return make_response(-1, message="你没有权限查看该题目")
if not problem:
return make_response(-1, message="未知题目ID")
if problem.problem_type != "remote_judge":
return make_response(-1, message="此题目非远程评测题目")
uploader: User = db.session.query(User.id, User.username).filter(
User.id == problem.uploader_id).one()
last_submission: Submission = db.session.query(Submission).filter(and_(
Submission.problem_id == problem.id,
Submission.uid == session.get("uid")
)).order_by(Submission.score.desc()).order_by(Submission.id.desc())
last_code, last_language, submission_id, status = "", next(iter(
config.REMOTE_JUDGE_OJS[problem.remote_judge_oj]["availableLanguages"].keys())), -1, None
if last_submission.count():
last_submission = last_submission.first()
last_code = last_submission.code
last_language = last_submission.language
status = last_submission.status
submission_id = last_submission.id
discussions = [
]
discussions_query = db.session.query(Discussion.id, Discussion.title).filter(
Discussion.path == f"discussion.problem.{problem.id}").order_by(Discussion.id.desc()).limit(5)
for item in discussions_query:
discussions.append({
"id": item.id,
"title": item.title
})
accounts = {}
for item in db.session.query(RemoteAccount.account_id, RemoteAccount.username, RemoteAccount.oj).filter(
and_(
RemoteAccount.uid == session.get("uid", -1),
RemoteAccount.oj == problem.remote_judge_oj
)
):
accounts[item.account_id] = {
"username": item.username,
"oj": config.REMOTE_JUDGE_OJS[item.oj]["display"],
"accountID": item.account_id
}
return make_response(0, data={
"isContest": contest is not None,
"problemData": {
"title": problem.title,
"content": problem.content,
"background": problem.background,
"inputFormat": problem.input_format,
"outputFormat": problem.output_format,
"examples": problem.example,
"createTime": problem.create_time,
"uploaderProfile": {
"uid": uploader.id,
"username": uploader.username
} if not contest else None,
"remoteProblemID": problem.remote_problem_id if not contest else None,
"remoteOJ": {
"id": problem.remote_judge_oj,
**config.REMOTE_JUDGE_OJS[problem.remote_judge_oj]
},
"public": problem.public if not contest else None,
"hint": problem.hint,
"recentDiscussions": discussions if not contest else None,
"acceptedCount": db.session.query(Submission).filter(Submission.problem_id == problem.id).filter(Submission.status == "accepted").count() if not contest else None,
"submissionCount": db.session.query(Submission).filter(Submission.problem_id == problem.id).count() if not contest else None,
"id": problem.id
},
"userData": {
"lastCode": last_code,
"lastLanguage": last_language,
"status": status,
"id": submission_id,
"managable": permission_manager.has_permission(
session.get("uid", None), "problem.manage"),
"accounts": accounts
}
}) | aa7f8100bc7516659cf535e0fa7222b6f7b1a065 | 3,654,835 |
def can_write(obj, user):
"""
Takes article or related to article model.
Check if user can write article.
"""
return obj.can_write(user) | 9cb7cc046b63fb82670c4667abe169d6a1a279e4 | 3,654,836 |
def create_external_question(url: str, height: int) -> str:
"""Create XML for an MTurk ExternalQuestion."""
return unparse({
'ExternalQuestion': {
'@xmlns': 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd',
'ExternalURL': url,
'FrameHeight': height
}
}, full_document=False) | d249e82225ab2c1546bd871c166e9b683622a15d | 3,654,837 |
def credentials_batch_account_key_secret_id(config):
# type: (dict) -> str
"""Get Batch account key KeyVault Secret Id
:param dict config: configuration object
:rtype: str
:return: keyvault secret id
"""
try:
secid = config[
'credentials']['batch']['account_key_keyvault_secret_id']
if util.is_none_or_empty(secid):
raise KeyError()
except KeyError:
return None
return secid | 4e7cfb100c2d50ef13d47295ff0b5bb0e3351986 | 3,654,838 |
import re
def is_C2D(lname):
"""
"""
pattns = ['Conv2D']
return any([bool(re.match(t,lname)) for t in pattns]) | a12bfd9857543e568148659f782615b3f2de4b83 | 3,654,839 |
def encounter_media(instance, filename):
"""Return an upload file path for an encounter media attachment."""
if not instance.encounter.id:
instance.encounter.save()
return 'encounter/{0}/{1}'.format(instance.encounter.source_id, filename) | 79e4d8fae1d41edf362e99e6da11442a71565aa0 | 3,654,840 |
def findFonts(pattern, lazy=True):
"""Answers a list of Font instances where the pattern fits the font path.
If pattern is a list, all parts should have a match.
# TODO: make case insensitive
"""
"""
>>> findFonts('Roboto-Thi')
[<Font Roboto-Thin>, <Font Roboto-ThinItalic>]
>>> # Select on family and name parts.
>>> findFonts(('Robo', 'Ita', 'Thi'))
[<Font Roboto-ThinItalic>]
>>> # Select on style parts only.
>>> findFonts(('Ita', 'Bol', 'Con'))
[<Font RobotoCondensed-BoldItalic>]
"""
fontPaths = getFontPaths()
fonts = []
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
for fontPath in fontPaths:
found = True
for match in pattern:
if not match in fontPath:
found = False
break
if found:
fonts.append(findFont(fontPath, lazy=lazy))
return fonts | dd5d0a9818292c1dadfc77f8b834b348e55ae777 | 3,654,841 |
from datetime import datetime
def time_range_cutter_at_time(local,time_range,time_cut=(0,0,0)):
""" Given a range, return a list of DateTimes that match the time_cut
between start and end.
:param local: if False [default] use UTC datetime. If True use localtz
:param time_range: the TimeRange object
:param time_cut: HH:MM:SS of when to cut. eg: (0,0,0) for midnight
"""
( start, end ) = time_range.get(local)
index = start.replace(
hour=time_cut[0],
minute=time_cut[1],
second=time_cut[2]
)
cuts = []
index += datetime.timedelta(days=1)
while index < end:
cuts.append(index)
index += datetime.timedelta(days=1)
if local:
index = time_range.normalize(index)
return cuts | 57e851fb5b6ae8873dde5719dec668c25561f687 | 3,654,842 |
def _darknet_conv(
x: np.ndarray, filters: int, size: int, strides: int = 1, batch_norm: bool = True
) -> tf.Tensor:
"""create 1 layer with [padding], conv2d, [bn and relu]"""
if strides == 1:
padding = "same"
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = "valid"
x = Conv2D(
filters=filters,
kernel_size=size,
strides=strides,
padding=padding,
use_bias=not batch_norm,
kernel_regularizer=l2(0.0005),
)(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x | f58153aa0c8af8df93289b872309f1c907941848 | 3,654,843 |
def _build_topic_to_consumer_topic_state_map(watermarks):
"""Builds a topic_to_consumer_topic_state_map from a kafka
get_topics_watermarks response"""
return {
topic: ConsumerTopicState({
partition: int((marks.highmark + marks.lowmark) / 2)
for partition, marks in watermarks_map.items()
}, None)
for topic, watermarks_map in watermarks.items()
} | 78ef0710e4823031ad079313484dba0eacc37135 | 3,654,844 |
from typing import Optional
def elgamal_keypair_from_secret(a: ElementModQ) -> Optional[ElGamalKeyPair]:
"""
Given an ElGamal secret key (typically, a random number in [2,Q)), returns
an ElGamal keypair, consisting of the given secret key a and public key g^a.
"""
secret_key_int = a
if secret_key_int < 2:
log_error("ElGamal secret key needs to be in [2,Q).")
return None
return ElGamalKeyPair(a, g_pow_p(a)) | 35de350b6bb434e1bb3d2c52d90f9a96be72dc1f | 3,654,845 |
def current_default_thread_limiter():
"""Get the default `~trio.CapacityLimiter` used by
`trio.to_thread.run_sync`.
The most common reason to call this would be if you want to modify its
:attr:`~trio.CapacityLimiter.total_tokens` attribute.
"""
try:
limiter = _limiter_local.get()
except LookupError:
limiter = CapacityLimiter(DEFAULT_LIMIT)
_limiter_local.set(limiter)
return limiter | 7abec5d74b9cfdaa663fd432587ea19440b7132f | 3,654,846 |
import copy
def _mask_board(board):
"""
A function that copies the inputted board replaces all ships with empty coordinates to mask them.
:param board: a 2D numpy array containing a string representation of the board. All ships should be visible.
:return: a 2D numpy array containing a string representation of the board, with all ships hidden.
"""
masked = copy.deepcopy(board) # copy operation
for (y, x), val in np.ndenumerate(board):
if val.isdigit():
masked[y][x] = ''
return masked | c6832c90ac96d61563e37482773abf627d92a05a | 3,654,847 |
from sys import stdout
def query_yes_no(question, default="no", color=_constants.COLORS.RESET):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
stdout.write(color + question + COLORS.RESET + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") | eee02b61b3ba9ffdded0ba8c50c6bdd8e52d3c00 | 3,654,848 |
def remove_head_id(ref, hyp):
"""Assumes that the ID is the begin token of the string which is common
in Kaldi but not in Sphinx."""
ref_id = ref[0]
hyp_id = hyp[0]
if ref_id != hyp_id:
print('Reference and hypothesis IDs do not match! '
'ref="{}" hyp="{}"\n'
'File lines in hyp file should match those in the ref file.'.format(ref_id, hyp_id))
exit(-1)
ref = ref[1:]
hyp = hyp[1:]
return ref, hyp | 210798e8a02f555f70a1d9f2de9ce098dd0669fb | 3,654,849 |
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp | 446feda40cc6698b5cbc80c3b14fa3212ef2800b | 3,654,850 |
def get_miner_day_list():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
date = request.form.get("date")
data = MinerService.get_miner_day_list(miner_no, date)
return response_json(data) | 4fd523e8855ba498a1d694e532d27c863e7f9407 | 3,654,851 |
def get_notebook_logs(experiment_id, operator_id):
"""
Get logs from a Experiment notebook.
Parameters
----------
experiment_id : str
operator_id : str
Returns
-------
dict or None
Operator's notebook logs. Or None when the notebook file is not found.
"""
notebook = get_jupyter_notebook(experiment_id, operator_id)
if not notebook:
return None
notebook = notebook["content"]
logs = {}
for cell in notebook["cells"]:
try:
metadata = cell["metadata"]["papermill"]
if metadata["exception"] and metadata["status"] == "failed":
for output in cell["outputs"]:
if output["output_type"] == "error":
error_log = output["traceback"]
traceback = remove_ansi_escapes(error_log)
logs = {"exception": output["ename"], "traceback": traceback}
except KeyError:
pass
return logs | d98865cdbca25839bb6010ab5e726fd35d162ada | 3,654,852 |
from typing import Callable
def modify_env2(
function: Callable[[_UpdatedType], _SecondType],
) -> Kinded[Callable[
[Kind2[_Reader2Kind, _FirstType, _SecondType]],
Kind2[_Reader2Kind, _FirstType, _UpdatedType],
]]:
"""
Modifies the second type argument of a ``ReaderBased2``.
In other words, it modifies the function's
signature from: ``a -> b``
to: ``Container[x, a] -> Container[x, b]``
.. code:: python
>>> from returns.pointfree import modify_env2
>>> from returns.context import RequiresContext
>>> def multiply(arg: int) -> RequiresContext[int, int]:
... return RequiresContext(lambda deps: arg * deps)
>>> assert modify_env2(int)(multiply(3))('4') == 12
Note, that this function works with only ``Kind2`` containers
with ``.modify_env`` method.
See :class:`returns.primitives.interfaces.specific.reader.ReaderBased2`
for more info.
"""
@kinded
def factory(
container: Kind2[_Reader2Kind, _FirstType, _SecondType],
) -> Kind2[_Reader2Kind, _FirstType, _UpdatedType]:
return internal_modify_env2(container, function)
return factory | 5ed2c5deaaa376e4884f31e3ba08d3b2839cc1a5 | 3,654,853 |
def model_trees(z, quantiles, normed=False,
dbhfile='c:\\projects\\MLM_Hyde\\Data\\hyde_runkolukusarjat.txt',
plot=False,
biomass_function='marklund'):
"""
reads runkolukusarjat from Hyde and creates lad-profiles for pine, spruce and decid.
Args:
z - grid (m)
quantiles - cumulative frequency limits for grouping trees
normed - True returns sum(lad*dz) normalized to unity
Returns:
lad_p, lad_s, lad_d - leaf-area density profiles for model treegroups (m2/m3)
n_p, n_s, n_d - trees / ha in model treegroups
"""
dat = np.loadtxt(dbhfile, skiprows=1)
dz = z[1]-z[0]
M = len(quantiles)
# year 2008 data
pine = dat[:, [0, 1]]
spruce = dat[:, [0, 2]]
decid = dat[:, [0, 3]]
# pines
h, hb, mleaf, L, a = profiles_hyde(pine, 'pine', z, biomass_function=biomass_function)
n = pine[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_p = np.zeros([len(z), M])
n_p = np.zeros(M)
lai_p = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_p[:, k] = np.sum(a[:, f], axis=1)
n_p[k] = np.sum(n[f])
lai_p[k] = sum(dz*lad_p[:,k])
m = quantiles[k]
if normed:
lad_p[:, k] = lad_p[:, k] / np.maximum(np.sum(lad_p[:, k] * dz), eps)
# spruces
h, hb, mleaf, L, a = profiles_hyde(spruce, 'spruce', z, biomass_function=biomass_function)
n = spruce[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_s = np.zeros([len(z), M])
n_s = np.zeros(M)
lai_s = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_s[:, k] = np.sum(a[:, f], axis=1)
n_s[k] = np.sum(n[f])
lai_s[k] = sum(dz*lad_s[:,k])
m = quantiles[k]
if normed:
lad_s[:, k] = lad_s[:, k] / np.maximum(np.sum(lad_s[:, k] * dz), eps)
# decid
h, hb, mleaf, L, a = profiles_hyde(decid, 'birch', z, biomass_function=biomass_function)
n = decid[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_d = np.zeros([len(z), M])
n_d = np.zeros(M)
lai_d = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_d[:, k] = np.sum(a[:, f], axis=1)
n_d[k] = np.sum(n[f])
lai_d[k] = sum(dz*lad_d[:,k])
m = quantiles[k]
if normed:
lad_d[:, k] = lad_d[:, k] / np.maximum(np.sum(lad_d[:, k] * dz), eps)
if plot:
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.figure(figsize=(2.5,3.5))
for k in range(M):
plt.plot(lad_p[:, k],z,color=colors[0], label='pine, %.2f m$^2$m$^{-2}$' % lai_p[k])#,lad_g,z)
plt.plot(lad_s[:, k],z,color=colors[1], label='spruce, %.2f m$^2$m$^{-2}$' % lai_s[k])
plt.plot(lad_d[:, k],z,color=colors[2], label='decid, %.2f m$^2$m$^{-2}$' % lai_d[k])
plt.title(" ")#dbhfile.split("/")[-1])
plt.ylabel('height [m]')
if normed:
plt.xlabel('normalized lad [-]')
else:
plt.xlabel('lad [m$^2$m$^{-3}$]')
plt.tight_layout()
return lad_p, lad_s, lad_d, n_p, n_s, n_d, lai_p, lai_s, lai_d | ba3c1ea345031a8b5434e1dd4f005b1c2c1e74ce | 3,654,854 |
def inject_general_timeline():
"""This function injects the function object 'Tweet.get_general_timeline'
into the application context so that 'get_general_timeline' can be accessed
in Jinja2 templates.
"""
return dict(get_general_timeline=Tweet.get_general_timeline) | 56b395da0facda561061c8f63eb3eb26c07f3605 | 3,654,855 |
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a Cosmo Skymed file. Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
CSKReader|None
`CSKReader` instance if Cosmo Skymed file, `None` otherwise
"""
if h5py is None:
return None
try:
csk_details = CSKDetails(file_name)
print('File {} is determined to be a Cosmo Skymed file.'.format(file_name))
return CSKReader(csk_details)
except (IOError, KeyError, ValueError):
# TODO: what all should we catch?
return None | f1292033411236f1e445e36458571e5025996a2b | 3,654,856 |
def get_vaccinated_model(model, area=None):
"""Get all states that can be vaccinated or recovered (by area).
Parameters
----------
model : amici.model
Amici model which should be evaluated.
areas : list
List of area names as strings.
Returns
-------
states : list
List of states that can be vaccinated.
"""
if area is None:
states = [
x
for x in model.getStateNames()
if not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
]
else:
states = [
x
for x in model.getStateNames()
if (
not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
)
and (area in x)
]
return states | c03a9d048abb08561463b1975ffec663f24267b3 | 3,654,857 |
from datetime import datetime
def MicrosecondsToDatetime(microseconds):
"""Returns a datetime given the number of microseconds, or None."""
if microseconds:
return datetime.utcfromtimestamp(float(microseconds) / 1000000)
return None | 69fd3dc3b8d1a97e7a64037cabe988365b2c6e63 | 3,654,858 |
import subprocess
def update() -> bool:
"""
Pull down the latest Docker image build and prune old image versions.
"""
current_image = DEFAULT_IMAGE
latest_image = latest_build_image(current_image)
if latest_image == current_image:
print(colored("bold", "Updating Docker image %s…" % current_image))
else:
print(colored("bold", "Updating Docker image from %s to %s…" % (current_image, latest_image)))
print()
# Pull the latest image down
try:
subprocess.run(
["docker", "image", "pull", latest_image],
check = True)
except (OSError, subprocess.CalledProcessError):
return False
# Update the config file to point to the new image so we use it by default
# going forward.
config.set("docker", "image", latest_image)
# Prune any old images which are now dangling to avoid leaving lots of
# hidden disk use around. We don't use `docker image prune` because we
# want to just remove _our_ dangling images, not all. We very much don't
# want to automatically prune unrelated images.
print()
print(colored("bold", "Pruning old images…"))
print()
try:
images = dangling_images(current_image) \
+ old_build_images(current_image)
if images:
subprocess.run(
["docker", "image", "rm", *images],
check = True)
except (OSError, subprocess.CalledProcessError) as error:
warn()
warn("Update succeeded, but an error occurred pruning old image versions:")
warn(" ", error)
warn()
return True | 1a5b8b4cf6cadccc8d95c4d9d8f5ca016199bd6e | 3,654,859 |
def get_default_accept_image_formats():
"""With default bentoML config, this returns:
['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp']
"""
return [
extension.strip()
for extension in config("apiserver")
.get("default_image_handler_accept_file_extensions")
.split(",")
] | 9f2e8514ed1dcc4d533be0e3f2e501a9a9784abb | 3,654,860 |
import sys
import glob
def findports():
"""Returns an array of the serial ports that have a command interpreter."""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(255)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
cmd= Cmd()
cmd.open(port)
cmd.close(True)
result.append(port)
except (OSError, serial.SerialException, CmdException) as error:
# print(error)
pass
return result | 38b16c56059c5f045b7c627cf1065c0754f7db28 | 3,654,861 |
import copy
def cells_handler(results, cl):
"""
Changes result cell attributes based on object instance and field name
"""
suit_cell_attributes = getattr(cl.model_admin, 'suit_cell_attributes', None)
if not suit_cell_attributes:
return results
class_pattern = 'class="'
td_pattern = '<td'
th_pattern = '<th'
for row, result in enumerate(results):
instance = cl.result_list[row]
for col, item in enumerate(result):
field_name = cl.list_display[col]
attrs = copy(suit_cell_attributes(instance, field_name))
if not attrs:
continue
# Validate
if not isinstance(attrs, dict):
raise TypeError('"suit_cell_attributes" method must return dict. '
'Got: %s: %s' % (
attrs.__class__.__name__, attrs))
# Merge 'class' attribute
if class_pattern in item.split('>')[0] and 'class' in attrs:
css_class = attrs.pop('class')
replacement = '%s%s ' % (class_pattern, css_class)
result[col] = mark_safe(
item.replace(class_pattern, replacement))
# Add rest of attributes if any left
if attrs:
cell_pattern = td_pattern if item.startswith(
td_pattern) else th_pattern
result[col] = mark_safe(
result[col].replace(cell_pattern,
td_pattern + dict_to_attrs(attrs)))
return results | c49bdb89597e191d0c6b65df1b58a80ac6bd5f9e | 3,654,862 |
def dynamic_import(import_string):
"""
Dynamically import a module or object.
"""
# Use rfind rather than rsplit for Python 2.3 compatibility.
lastdot = import_string.rfind('.')
if lastdot == -1:
return __import__(import_string, {}, {}, [])
module_name, attr = import_string[:lastdot], import_string[lastdot + 1:]
parent_module = __import__(module_name, {}, {}, [attr])
return getattr(parent_module, attr) | f6418ff17f3d480b22abac1146d946a5f990cb3c | 3,654,863 |
from typing import Union
from typing import List
def _split_on_parenthesis(text_in: Union[str, list[str]]) -> List[str]:
"""Splits text up into a list of strings based on parenthesis locations."""
if isinstance(text_in, list):
if None in text_in:
return text_in
text_list = text_in
elif isinstance(text_in, str):
text_list = [text_in]
else:
return text_in
for i, text in enumerate(text_list):
if isinstance(text, str) and "(" in text:
text_inside = text[text.find("(")+1:text.rfind(")")]
out_add = _split_list(text, text_inside)
out_add[0] = out_add[0][:-1] # remove (
out_add[2] = out_add[2][1:] # remove )
out_add[1] = _get_unit(text_inside)
out_add = [text for text in out_add if text != ""]
out_add = [text for text in out_add if text != None]
text_list[i] = out_add
return _flatten_list(text_list) | 7c7994590838c0293869786841eb7f97c60b16e8 | 3,654,864 |
from typing import Callable
import functools
import sys
def shell_command_error2exit_decorator(func: Callable):
"""Decorator to convert given ShellCommandException to an exit message
This avoids displaying nasty stack traces to end-users
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except ShellCommandException as e:
e = e.__cause__
print(f"{e}:\n{e.output}")
sys.exit(1)
return func_wrapper | db2a51401c6616a8cf54928fc709923de3057a73 | 3,654,865 |
import requests
def getExternalIP():
""" Returns external ip of system """
ip = requests.get("http://ipv4.myexternalip.com/raw").text.strip()
if ip == None or ip == "":
ip = requests.get("http://ipv4.icanhazip.com").text.strip()
return ip | 77847063a2da7c6484dd6e569786a012b3a0a62f | 3,654,866 |
def intersection_indices(a, b):
"""
:param list a, b: two lists of variables from different factors.
returns a tuple of
(indices in a of the variables that are in both a and b,
indices of those same variables within the list b)
For example, intersection_indices([1,2,5,4,6],[3,5,1,2]) returns
([0, 1, 2], [2, 3, 1]).
"""
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
mapA = []
mapB = []
for i, itm in enumerate(a):
if itm in bind:
mapA.append(i)
mapB.append(bind.get(itm))
return mapA, mapB | 55264faaa4fd5e6dc5365b675ebd3b7f6a1e1280 | 3,654,867 |
def test_extract_requested_slot_from_entity_with_intent():
"""Test extraction of a slot value from entity with the different name
and certain intent
"""
# noinspection PyAbstractClass
class CustomFormAction(FormAction):
def slot_mappings(self):
return {"some_slot": self.from_entity(entity="some_entity",
intent="some_intent")}
form = CustomFormAction()
tracker = Tracker('default', {'requested_slot': 'some_slot'},
{'intent': {'name': 'some_intent', 'confidence': 1.0},
'entities': [{'entity': 'some_entity',
'value': 'some_value'}]},
[], False, None, {}, 'action_listen')
slot_values = form.extract_requested_slot(CollectingDispatcher(),
tracker, {})
# check that the value was extracted for correct intent
assert slot_values == {'some_slot': 'some_value'}
tracker = Tracker('default', {'requested_slot': 'some_slot'},
{'intent': {'name': 'some_other_intent',
'confidence': 1.0},
'entities': [{'entity': 'some_entity',
'value': 'some_value'}]},
[], False, None, {}, 'action_listen')
slot_values = form.extract_requested_slot(CollectingDispatcher(),
tracker, {})
# check that the value was not extracted for incorrect intent
assert slot_values == {} | 0b457700781183f275a8512e16bac53aa058d762 | 3,654,868 |
def graph_cases(selenium, host):
"""
Factory method that allows to draw preconfigured graphs and manipulate them
with a series of helpful methods.
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:type host: qmxgraph.server.Host
:rtype: GraphCaseFactory
:return: Factory able to create cases.
"""
return GraphCaseFactory(selenium=selenium, host=host) | 2df048d35a337e8d335844b7a1bb98db77816e5d | 3,654,869 |
def figure_8():
"""
Notes
-----
Colors from Bang Wong's color-blind friendly colormap. Available at:
https://www.nature.com/articles/nmeth.1618
Wong's map acquired from David Nichols page. Available at:
https://davidmathlogic.com/colorblind/.
"""
# choosing test sample and network.
sample = const.SAMPLE_232p3_wet
network_folder = const.FOLDER_PRED_UNET
# we will return a 10 x 10 matthews matrix; each for a crop
matthews_coefs = np.ones((10, 10))
worst_indexes = np.zeros((10, 10))
# a variable to obtain inlay data.
inlay_data = []
# reading input data.
is_registered = sample['registered_path'] is not None
data_pred, data_gs = _pred_and_goldstd(sample,
folder_prediction=network_folder,
is_registered=is_registered,
is_binary=True)
data_pred = data_pred[slice(*sample['segmentation_interval'])]
# comp_color starts as gray (background).
comp_color = np.ones(
(*data_pred[0].shape, 3)
) * (np.asarray((238, 238, 238)) / 255)
for idx, (img_pred, img_gs) in enumerate(zip(data_pred, data_gs)):
# crop images in 100 (256, 256) pieces.
crop_pred = util.view_as_blocks(img_pred,
block_shape=(256, 256))
crop_gs = util.view_as_blocks(img_gs,
block_shape=(256, 256))
for i, _ in enumerate(crop_pred):
for j, _ in enumerate(crop_pred[i]):
# calculate the Matthews coefficient for each crop.
aux_conf = _confusion_matrix(crop_gs[i, j],
crop_pred[i, j])
aux_matthews = _measure_matthews(aux_conf)
# if smaller than previously, save results.
# restricting aux_matthews > 0.1 due to errors in all-TN regions
if (0.1 < aux_matthews < matthews_coefs[i, j]):
matthews_coefs[i, j] = aux_matthews
worst_indexes[i, j] = idx
aux_comp = _comparison_color(crop_gs[i, j], crop_pred[i, j])
comp_color[i*256:(i+1)*256, j*256:(j+1)*256] = aux_comp
# grab inlay data from crops we want to highlight.
for i, j in [(2, 2), (8, 7)]:
inlay_data.append(comp_color[i*256:(i+1)*256, j*256:(j+1)*256])
# Figure 8(a).
plt.figure(figsize=FIGURE_SIZE)
plt.imshow(comp_color)
for idx in np.arange(start=0, stop=2560, step=256): # according to image
plt.axvline(idx, color='white')
plt.axhline(idx, color='white')
matthews_coefs = np.round(matthews_coefs * 100, decimals=2)
for i, j in product(range(10), repeat=2):
facecolor, textcolor = _label_color(matthews_coefs[j, i])
plt.text(x=i*256 + 30, y=j*256 + 50,
s=str(matthews_coefs[j, i]),
fontsize=8,
color=textcolor,
bbox=dict(facecolor=facecolor, alpha=0.9))
_check_if_folder_exists(folder='./figures')
plt.savefig('figures/Fig_08a' + SAVE_FIG_FORMAT, bbox_inches='tight')
plt.close()
# Figures 8(b, c).
indexes = {0: 'b', 1: 'c'}
for idx in indexes.keys():
plt.figure(figsize=FIGURE_SIZE)
plt.imshow(inlay_data[idx])
_check_if_folder_exists(folder='./figures')
plt.savefig(f'figures/Fig_08{indexes[idx]}' + SAVE_FIG_FORMAT,
bbox_inches='tight')
plt.close()
return None | 2a72f24673b96b577fc4f4a23a1869740e90c3ec | 3,654,870 |
import re
def check_threats(message):
"""Return list of threats found in message"""
threats = []
for threat_check in get_threat_checks():
for expression in threat_check["expressions"]:
if re.search(expression, message, re.I | re.U):
del threat_check["expressions"]
threats += [threat_check]
break
return threats | 091d370e4a2e6cbdf674d6dde73bf616b994498b | 3,654,871 |
def data_processing_max(data, column):
"""Compute the max of a column."""
return costly_compute_cached(data, column).max() | 299075ea3e1953abe0ffbd71afb42525c6270c49 | 3,654,872 |
from typing import Sequence
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' | 2c721ab04cdba3209794a21b2b25fe10485be106 | 3,654,873 |
from typing import List
def extract_data_from_csv_stream(client: Client, alert_id: str,
attachment_id: str, delimiter: bytes = b'\r\n') -> List[dict]:
"""
Call the attachment download API and parse required fields.
Args:
client (Client): Cyberint API client.
alert_id (str): ID of the alert the attachment belongs to.
attachment_id (str): ID of the attachment itself.
delimiter (bytes): Delimeter for the CSV file.
Returns:
list(dict): List of all the data found using the wanted fields.
"""
first_line = True
field_indexes = {} # {wanted_field_name: wanted_field_index...}
information_found = []
for csv_line in client.get_csv_file(alert_id, attachment_id, delimiter):
csv_line_separated = csv_line.split(',')
if first_line:
for field in CSV_FIELDS_TO_EXTRACT:
try:
field_indexes[field] = csv_line_separated.index(field)
except ValueError:
pass
first_line = False
else:
try:
extracted_field_data = {field_name.lower(): csv_line_separated[field_index]
for field_name, field_index in field_indexes.items()}
if extracted_field_data:
information_found.append(extracted_field_data)
except IndexError:
pass
return information_found | 992679004ae94da2731b04eaf41918a755d8306a | 3,654,874 |
import re
def validate_password(password, password_repeat=None):
"""
Validate user password.
:param password: password as string
:param password_repeat: repeat password
:return: False - valid password
"""
if password_repeat:
if password != password_repeat:
return "Passwords did not match."
flag = False
if len(password) < 8:
flag = True
elif not re.search("[a-z]", password):
flag = True
elif not re.search("[A-Z]", password):
flag = True
elif not re.search("[0-9]", password):
flag = True
elif re.search("\s", password):
flag = True
if flag:
return (
"Password must contain at least a lower case, an upper case, a number, no spaces "
"and be at least 9 characters."
)
return False | 2987a1bec151e173156ab6a72345864c84dcb61c | 3,654,875 |
def get_large_circuit(backend: IBMBackend) -> QuantumCircuit:
"""Return a slightly larger circuit that would run a bit longer.
Args:
backend: Backend on which the circuit will run.
Returns:
A larger circuit.
"""
n_qubits = min(backend.configuration().n_qubits, 20)
circuit = QuantumCircuit(n_qubits, n_qubits)
for qubit in range(n_qubits - 1):
circuit.h(qubit)
circuit.cx(qubit, qubit + 1)
circuit.measure(list(range(n_qubits)), list(range(n_qubits)))
return circuit | a35a9ee67d6268911f49936095a703b4fd227a56 | 3,654,876 |
import os
import time
def loadUserProject():
""" Loads a project that contains only the contents of user.dev.
This project will not be cached, so every call will reload it."""
userFilePath = os.path.join(os.path.expanduser(devon.userPath), userFileName)
project = DevonProject("", time.time())
__mergeProject(project, "", userFilePath)
return project | 56edecff5feba9062770f164a33e281cf8232144 | 3,654,877 |
import torch
def top_k(loc_pred, loc_true, topk):
"""
count the hit numbers of loc_true in topK of loc_pred, used to calculate Precision, Recall and F1-score,
calculate the reciprocal rank, used to calcualte MRR,
calculate the sum of DCG@K of the batch, used to calculate NDCG
Args:
loc_pred: (batch_size * output_dim)
loc_true: (batch_size * 1)
topk:
Returns:
tuple: tuple contains:
hit (int): the hit numbers \n
rank (float): the sum of the reciprocal rank of input batch \n
dcg (float): dcg
"""
assert topk > 0, "top-k ACC评估方法:k值应不小于1"
loc_pred = torch.FloatTensor(loc_pred)
val, index = torch.topk(loc_pred, topk, 1)
index = index.numpy()
hit = 0
rank = 0.0
dcg = 0.0
for i, p in enumerate(index):
target = loc_true[i]
if target in p:
hit += 1
rank_list = list(p)
rank_index = rank_list.index(target)
# rank_index is start from 0, so need plus 1
rank += 1.0 / (rank_index + 1)
dcg += 1.0 / np.log2(rank_index + 2)
return hit, rank, dcg | 8796312e1fa4d43fb992c0dd7903070a9e061e1b | 3,654,878 |
def enviar_contacto(request):
"""
Enviar email con el formulario de contacto
a soporte tecnico
"""
formulario = ContactoForm()
if request.method == 'POST':
formulario = ContactoForm(request.POST)
if formulario.is_valid():
mail = EmailMessage(subject='HPC Contacto',
from_email=formulario.cleaned_data['email'],
to=EMAIL_TO)
mail.body = 'El usuario %s ha comentado: %s' \
% (formulario.cleaned_data['nombre'], formulario.cleaned_data['mensaje'])
mail.send()
messages.success(request, "El personal de soporte técnico ha recibido su consulta, "
"pronto nos pondremos en contacto.")
return HttpResponseRedirect('/')
ctx = {'form': formulario}
return render_to_response('contacto/enviar_contacto.html', ctx, context_instance=RequestContext(request)) | 2f17e0cd0fbd5c5df345484c5fe08a420272785a | 3,654,879 |
def dict_depth(d):
"""
递归地获取一个dict的深度
d = {'a':1, 'b': {'c':{}}} --> depth(d) == 3
"""
if isinstance(d, dict):
return 1 + (max(map(dict_depth, d.values())) if d else 0)
return 0 | 16f4164fdea08af9d5846a5866428c81848726b9 | 3,654,880 |
def apercorr(psf,image,objects,psfobj,verbose=False):
"""
Calculate aperture correction.
Parameters
----------
psf : PSF object
The best-fitting PSF model.
image : string or CCDData object
The input image to fit. This can be the filename or CCDData object.
objects : table
The output table of best-fit PSF values for all of the sources.
psfobj : table
The table of PSF objects.
verbose : boolean, optional
Verbose output to the screen. Default is False.
Returns
-------
objects : table
The output table with an "apcorr" column inserted and the aperture correction
applied to "psfmag".
apcor : float
The aperture correction in mag.
cgrow : numpy array
The cumulative aperture correction array.
Example
-------
apcor = apercorr(psf,image,objects,psfobj)
"""
# Get model of all stars except the PSF stars
ind1,ind2 = dln.match(objects['id'],psfobj['id'])
left = np.delete(np.arange(len(objects)),ind1)
neiobj = objects[left]
neimodel = image.copy()
neimodel.data *= 0
neimodel.error[:] = 1
neimodelim = psf.add(neimodel,neiobj)
neimodel.data = neimodelim
# Subtract everything except the PSF stars from the image
resid = image.copy()
if image.mask is not None:
resid.data[~resid.mask] -= neimodel.data[~resid.mask]
else:
resid.data -= modelnei.data
residim = np.maximum(resid.data-resid.sky,0)
resid.data = residim
resid.sky[:] = 0.0
# Do aperture photometry with lots of apertures on the PSF
# stars
# rk = (20/3.)**(1/11.) * rk-1 for k=2,..,12
rseeing = psf.fwhm()*0.5
apers = np.cumprod(np.hstack((3.0,np.ones(11,float)*(20/3.)**(1/11.))))
#apers = np.array([3.0,3.7965,4.8046,6.0803,7.6947,9.7377,12.3232,
# 15.5952,19.7360,24.9762,31.6077,40.0000])
apercat = aperphot(resid,psfobj,apers)
# Fit curve of growth
# use magnitude differences between successive apertures.
apars, agrow, derr = fitgrowth(apercat,apers,rseeing=psf.fwhm()*0.5)
# Get magnitude difference errors
nstars = len(apercat)
napers = len(apers)
derr = np.zeros((nstars,napers-1),float)
for i in range(len(apers)-1):
err1 = apercat['magerr_aper'+str(i+1)]
err2 = apercat['magerr_aper'+str(i+2)]
derr[:,i] = np.sqrt(err1**2+err2**2)
wt = 1/derr**2
# THE CURVE TURNS OVER AT LARGE RADIUS!!!!???
# It shouldn't EVER do that.
# Calculate empirical growth curve
egrow,egrowerr = empgrowth(apercat,apers)
# Get "adopted" growth curve by taking the weighted average
# of the analytical and empirical growth curves
# with the empirical weighted higher at small r and
# the analytical weighted higher at large r
gwt = np.mean(wt,axis=0) # mean weights over the stars
adopgrow = (egrow*gwt + agrow*(1/(0.1*agrow))**2) / (gwt+(1/(0.1*agrow))**2)
adopgrowerr = 1 / (gwt+(1/(0.1*agrow))**2)
# Adopted cumulative growth curve
# sum from the outside in, with an outer tail given by
# extrapolation of the analytic model to 2*outer aperture
cadopgrow = np.cumsum(adopgrow[::-1])[::-1]
# add extrapolation from rlast t=o2*rlast
tail = diffprofile([2*apers[-1],apers[-1]],*apars)
cadopgrow += tail
cadopgrow = np.hstack((cadopgrow,tail)) # add value for outer aperture
cadopgrowerr = np.hstack((adopgrowerr,0.0))
# Calculate "total" magnitude for the PSF stars
totmag,toterr = totphot(apercat,apers,cadopgrow,cadopgrowerr)
# Calculate median offset between total and PSF magnitude
# psf - total
ind1,ind2 = dln.match(objects['id'],psfobj['id'])
diffmag = objects['psfmag'][ind1] - totmag[ind2]
apcor = np.median(diffmag) # positive value
# Apply aperture correction to the data
# add apcorr column and keep initial mags in instmag
objects['apcorr'] = apcor
objects['inst_psfmag'] = objects['psfmag']
objects['psfmag'] -= apcor # make brighter
if verbose:
print('Aperture correction = %.3f mag' % apcor)
return objects, apcor, cadopgrow | bc4bb936801fe06a55648ed9a11545eacb24fd7d | 3,654,881 |
from typing import Dict
from typing import Tuple
def product_loading_factor_single_discount(skus: str, product_list: Dict[str, object], product: Dict[str, int], product_name: str, rules: list) -> Tuple[int, str]:
"""
Single product loading factor for calculating discounts with one rule
Parameters
----------
skus: str
String containing indiviudal product skus
product_list: Dict[str, object]
Product discount list used for applying discounts
product: Dict[str, int]
Product list used for returning the current products price
product_name: str
The name of the product
rules: List
List of discount rules names to apply
Returns
-------
Tuple:
price: int
Calculated price
skus: str
Updated skus list
"""
number_of_products = skus.count(product_name)
product_price = product[product_name]
product_discount_data_object = product_list[product_name][rules[0]]
discount_threshold = product_discount_data_object['discount_threshold']
while number_of_products > 0:
if number_of_products > 0 and number_of_products % discount_threshold == 0:
product_discount_data_object['count'] += 1
number_of_products -= discount_threshold
else:
number_of_products -= 1
applied_discount = product_discount_data_object['count']
remainder_product_count = skus.count(product_name) - (applied_discount * discount_threshold)
discount_to_apply = product_discount_data_object['discount']
apply_discount = (applied_discount * product_price * discount_threshold) - (applied_discount * discount_to_apply)
price = apply_discount + (remainder_product_count * product_price)
return price, skus | 44e12d02be7c8b54d1ea64ef2dc3cbec29a870bc | 3,654,882 |
import re
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page)) | 8042cc5049b2d8b6646c10655b84c5552e315274 | 3,654,883 |
def calculate_recall(tp, n):
"""
:param tp: int
Number of True Positives
:param n: int
Number of total instances
:return: float
Recall
"""
if n == 0:
return 0
return tp / n | b8a36488af59e036acdb50821716ae34287e6b8f | 3,654,884 |
def authenticate_user_password(password : 'bytes', encryption_dict : 'dict', id_array : 'list'):
"""
Authenticate the user password.
Parameters
----------
password : bytes
The password to be authenticated as user password.
encryption_dict : dict
The dictionary containing all the information about the encryption procedure.
id_array : list
The two elements array ID, contained in the trailer dictionary.
Returns
-------
The encryption key if the user password is valid, None otherwise.
"""
R = encryption_dict["R"]
U = encryption_dict["U"]
U = U.value if isinstance(U, PDFLiteralString) else unhexlify(U.value)
encryption_key = compute_encryption_key(password, encryption_dict, id_array)
if R == 2:
cipher = rc4(PASSWORD_PADDING, encryption_key)
else:
input_to_md5 = bytearray()
input_to_md5.extend(PASSWORD_PADDING)
input_to_md5.extend(id_array[0])
computed_hash = md5(input_to_md5).digest()
cipher = rc4(computed_hash, encryption_key)
for counter in range(1, 20):
cipher = rc4(cipher, bytes(x ^ counter for x in encryption_key))
correct_password = (U[:16] == cipher[:16]) if R >= 3 else (U == cipher)
return encryption_key if correct_password else None | b608a921fb02cedf9da9d8ea8e0d8f8139a6a9bd | 3,654,885 |
def date_to_num(date):
"""Convert datetime to days since 1901"""
num = (date.year - 1901) * 365.25
num += [
0, 31, 59.25, 90.25, 120.25,
151.25, 181.25, 212.25, 243.25,
273.25, 304.25, 334.25
][date.month - 1]
num += date.day
return int(num) | 88e342e0fc80a5998df8e5f1ab0002e0f7fe808e | 3,654,886 |
from typing import Tuple
def load_world(filename: str, size: Tuple[int, int], resolution: int) -> np.array:
"""Load a preconstructred track to initialize world.
Args:
filename: Full path to the track file (png).
size: Width and height of the map
resolution: Resolution of the grid map (i.e. into how many cells)
one meter is divided into.
Returns:
An initialized gridmap based on the preconstructed track as
an n x m dimensional numpy array, where n is the width (num cells)
and m the height (num cells) - (after applying resolution).
"""
width_in_cells, height_in_cells = np.multiply(size, resolution)
world = np.array(png_to_ogm(
filename, normalized=True, origin='lower'))
# If the image is already in our desired shape, no need to rescale it
if world.shape == (height_in_cells, width_in_cells):
return world
# Otherwise, scale the image to our desired size.
resized_world = resize(world, (width_in_cells, height_in_cells))
return resized_world | 8ccf97efb83b3c365fb95a2732d0737100d5f254 | 3,654,887 |
import torch
def generate_image(model, img_size, n_flow, n_block, n_sample, temp=0.7, ctx=None, label=None):
"""Generate a single image from a Glow model."""
# Determine sizes of each layer
z_sample = []
z_shapes = calc_z_shapes(3, img_size, n_flow, n_block)
for z in z_shapes:
z_new = torch.randn(n_sample, *z) * temp
z_sample.append(z_new.to(device))
assert ctx is None or label is None # can either insert label or context
if label is not None:
return model.reverse(z_sample, label=label)
else:
# handles both cases where only context is provided or no label or context is provided
return model.reverse(z_sample, ctx=ctx) | bee9c45cbbd028351e580729da51092604f87288 | 3,654,888 |
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg) | e0171c3b0eee18c7fcc44cbdfe007949feabba9a | 3,654,889 |
def slice_map(center, radius, m):
""" :func:`slice_map` for slicing Map object based on center and radius.
:param center: x, y tuple of center of sliced map
:param radius: - :class:`int` center of sliced map
:param m: - :class:`Map` Map object that want to be sliced
return :class:`Map`
"""
# TODO
# it should slice player list and kingdom list too
results = dict()
for cell in m.gen_tiles():
if distance(center, (cell.coordinate.x, cell.coordinate.y)) <= radius:
if cell.region_id not in results:
results[cell.region_id] = []
results[cell.region_id].append(cell)
new_map = Map(m.client)
new_map._raw_data = deepcopy(m._raw_data)
new_map._raw_data["response"]["1"]["region"] = results
return new_map | fe30795d2330a1a2572361438a79e1e8fa9bc3cf | 3,654,890 |
from pathlib import Path
import requests
import shutil
def download_file(url) -> Path:
"""Better download"""
name = Path(urlparse(unquote(url)).path).name
with mktempdir() as tmpdir:
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=30)
def get():
with requests.get(url, stream=True) as r:
save_path = tmpdir.joinpath(name)
with open(save_path, "wb") as f:
shutil.copyfileobj(r.raw, f, length=16 * 1024 * 1024)
return save_path
yield get() | 5ff6c05e5e1eb3379918c65d945d57af7e8d56be | 3,654,891 |
def creategui(handlerfunctions):
"""Initializes and returns the gui."""
gui = GUI(handlerfunctions)
# root.title('DBF Utility')
return gui | 17be3bae6eb105aca770327898a01027271e6f9c | 3,654,892 |
import torch
def rank_src_trgs(enc_dec_gen, src_list, trg_list):
"""
"""
batch_size = len(trg_list)
x, y = enc_dec_gen.encode_inputs(src_list,
trg_list,
add_bos=True,
add_eos=True)
y_len = torch.sum(y.ne(enc_dec_gen.model.PAD), -1)
with torch.no_grad():
y_target = y[:, 1:]
y = y[:, :-1]
enc_self_attn_mask = enc_dec_gen.model.get_attn_mask(x, x)
enc_outputs = enc_dec_gen.model.encoder(x,
enc_self_attn_mask)
enc_output = enc_outputs[0]
n = y.size(0)//x.size(0)
x = x.repeat([1,n]).view(y.size(0), -1)
enc_output = enc_output.repeat([1, n, 1]).view(x.size(0), x.size(1), -1)
dec_self_attn_mask = enc_dec_gen.model.get_subsequent_mask(y)
dec_self_attn_mask = dec_self_attn_mask | enc_dec_gen.model.get_attn_mask(y, y)
dec_enc_attn_mask = enc_dec_gen.model.get_attn_mask(y, x)
trg_embedding = None
if enc_dec_gen.model.share_src_trg_emb == True:
trg_embedding = enc_dec_gen.model.encoder.src_embedding
dec_outputs = enc_dec_gen.model.decoder(y,
enc_output,
dec_self_attn_mask,
dec_enc_attn_mask,
trg_embedding=trg_embedding)
logits = dec_outputs[0]
logits = logits.view(-1, enc_dec_gen.trg_vocab_size)
log_probs = -F.nll_loss(F.log_softmax(logits, -1),
y_target.contiguous().view(-1),
ignore_index=enc_dec_gen.model.PAD,
reduction='none')
log_probs = torch.sum(log_probs.view(batch_size, -1), -1)
norm = 1
if enc_dec_gen.normalize == "gnmt":
norm = torch.pow(5. + y_len, enc_dec_gen.gamma) / np.power(6., enc_dec_gen.gamma)
elif enc_dec_gen.normalize == "linear":
norm = y_len
log_probs = log_probs / norm
log_probs = log_probs.cpu().numpy()
return log_probs | f5472889489676e21a7bec032e13ef99c850f2da | 3,654,893 |
def plugin_poll(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for poll mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
Exception
"""
try:
time_stamp = utils.local_timestamp()
data = {'asset': handle['assetName']['value'],
'timestamp': time_stamp,
'readings': {"random": next(generate_data())}}
except (Exception, RuntimeError) as ex:
_LOGGER.exception("Exception is {}".format(str(ex)))
raise ex
else:
return data | c3d7b32b6816c81d244f689ce4185d1dcd9a16fe | 3,654,894 |
def top_average_pathways(document, file, max_sets, get_all=False):
""" Read the pathways file and get the top average pathways """
# read in the samples and get the data with out the stratification by bug
samples, pathways, data = document.read_table(file)
pathway_names = utilities.pathway_names(pathways)
pathways, data = utilities.remove_stratified_pathways(pathways,
data, remove_description=True)
# remove extra identifier from sample name if included in workflow
samples = [sample.replace("_Abundance","").replace("-RPKs","") for sample in samples]
# get the average abundance for the pathways
if get_all:
top_pathways, top_data = pathways, data
else:
top_pathways, top_data = utilities.top_rows(pathways,
data, max_sets, function="average")
# get the top names with descriptions
top_names_and_descriptions = [name+":"+pathway_names[name] for name in top_pathways]
return samples, top_pathways, top_data, top_names_and_descriptions | 4d4ed4fa9156ac98466197090afc52ded517af95 | 3,654,895 |
import torch
def ltria2skew(L):
"""
assume L has already passed the assertion check
:param L: lower triangle matrix, shape [N, 3]
:return: skew sym A [N, 3, 3]
"""
if len(L.shape) == 2:
N = L.shape[0]
# construct the skew-sym matrix
A = torch.zeros(N, 3, 3).cuda() # [N, 3, 3]
A[:, 1, 0] = L[:, 0]
A[:, 2, 0] = L[:, 1]
A[:, 2, 1] = L[:, 2]
A[:, 0, 1] = -L[:, 0]
A[:, 0, 2] = -L[:, 1]
A[:, 1, 2] = -L[:, 2]
return A
elif len(L.shape) == 1:
A = torch.zeros(3, 3).cuda()
A[1, 0] = L[0]
A[2, 0] = L[1]
A[2, 1] = L[2]
A[0, 1] = -L[0]
A[0, 2] = -L[1]
A[1, 2] = -L[2]
return A
else:
raise NotImplementedError | 6e74c181fc8efcdc28ba35578f31fb6f2a7fa1bb | 3,654,896 |
def gamma_contrast(data_sample, num_patches=324, num_channel=2, shape_data=None,
gamma_range=(0.5, 1.7), invert_image=False, per_channel=False,
retain_stats=False):
"""Performs gamma contrast transformation"""
epsilon = 1e-7
data_sample_patch = []
gamma_range_tensor = tf.convert_to_tensor(gamma_range)
for patch in range(num_patches):
if invert_image:
data_sample = - data_sample
if not per_channel:
# if np.random.random() < 0.5 and gamma_range[0] < 1:
# gamma = np.random.uniform(gamma_range[0], 1)
# else:
# gamma = np.random.uniform(max(gamma_range[0], 1), gamma_range[1])
def true_fn():
gamma_fn = tf.random.uniform(shape=(), minval=gamma_range[0], maxval=1, seed=1)
return gamma_fn
def false_fn():
gamma_fn = tf.random.uniform(shape=(), minval=tf.math.maximum(gamma_range[0], 1),
maxval=gamma_range[1], seed=1)
return gamma_fn
cond = tf.math.logical_and(tf.math.less(tf.random.uniform(shape=(), minval=0, maxval=0.99, seed=1), 0.5),
tf.math.less(gamma_range_tensor[0], 1))
gamma = tf.cond(cond, true_fn, false_fn)
min_val_ten = tf.math.reduce_min(data_sample[patch, ...])
range_tensor = tf.math.reduce_max(data_sample[patch, ...]) - min_val_ten
data_sample_norm = tf.math.divide(tf.math.subtract(data_sample[patch, ...], min_val_ten),
tf.math.add(range_tensor, epsilon))
data_img = tf.image.adjust_gamma(image=data_sample_norm, gamma=gamma,
gain=tf.math.add(range_tensor, epsilon))
data_img = tf.math.add(data_img, min_val_ten)
data_sample_patch.append(data_img)
else:
data_sample_per_channel = []
for c in range(num_channel):
def true_fn():
gamma_fn = tf.random_uniform_initializer(minval=gamma_range[0], maxval=1, seed=1)
return gamma_fn
def false_fn():
gamma_fn = tf.random_uniform_initializer(minval=tf.math.maximum(gamma_range[0], 1),
maxval=gamma_range[1], seed=1)
return gamma_fn
cond = tf.math.logical_and(tf.math.less(tf.random.uniform(shape=(), minval=0, maxval=0.99, seed=1), 0.5),
tf.math.less(gamma_range_tensor[0], 1))
gamma = tf.cond(cond, true_fn, false_fn)
min_val_ten = tf.math.reduce_min(data_sample[patch, :, :, :, c])
#rnge_tensor = tf.math.reduce_max(data_sample[patch, :, :, :, c]) - min_val_ten
data_sample_norm = tf.math.divide(tf.math.subtract(data_sample[patch, ..., c], min_val_ten),
tf.math.add(range_tensor, epsilon))
data_img = tf.image.adjust_gamma(image=data_sample_norm, gamma=gamma,
gain=tf.math.add(range_tensor, epsilon))
data_img = tf.math.add(data_img, min_val_ten)
data_sample_per_channel.append(data_img)
data_sample_channel = tf.stack(data_sample_per_channel)
data_sample_channel = tf.transpose(data_sample_channel, perm=[1, 2, 3, 0])
data_sample_patch.append(data_sample_channel)
data_sample_return = tf.stack(data_sample_patch)
# data_sample_return = tf.transpose(data_sample_return, perm=[1, 2, 3, 4, 0])
return data_sample_return | 373f3f7e602de69c1cbce328ec3ff1322a44d013 | 3,654,897 |
def _converge(helper, rcs, group):
"""
Function to be passed to :func:`_oob_disable_then` as the ``then``
parameter that triggers convergence.
"""
return group.trigger_convergence(rcs) | 8aab701dc7e29d83d6c8ab8b71c37837feb72847 | 3,654,898 |
def HybridClientFactory(jid, password):
"""
Client factory for XMPP 1.0.
This is similar to L{client.XMPPClientFactory} but also tries non-SASL
autentication.
"""
a = HybridAuthenticator(jid, password)
return xmlstream.XmlStreamFactory(a) | 283d9182c0e7bce254bc9f04cd42c15b9e3aed46 | 3,654,899 |
Subsets and Splits