content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from unittest.mock import Mock
async def test_10_request(requests_mock: Mock) -> None:
"""Test `async request()`."""
result = {"result": "the result"}
rpc = RestClient("http://test", "passkey", timeout=0.1)
def response(req: PreparedRequest, ctx: object) -> bytes: # pylint: disable=W0613
assert req.body is not None
_ = json_decode(req.body)
return json_encode(result).encode("utf-8")
requests_mock.post("/test", content=response)
ret = await rpc.request("POST", "test", {})
assert requests_mock.called
auth_parts = requests_mock.last_request.headers['Authorization'].split(' ', 1)
assert auth_parts[0].lower() == 'bearer'
assert auth_parts[1] == 'passkey'
assert ret == result
result2 = {"result2": "the result 2"}
def response2(req: PreparedRequest, ctx: object) -> bytes: # pylint: disable=W0613
assert req.body is not None
_ = json_decode(req.body)
return json_encode(result2).encode("utf-8")
requests_mock.post("/test2", content=response2)
ret = await rpc.request("POST", "/test2")
assert requests_mock.called
assert ret == result2 | fa9e03d5b3f5a4f594db29eae057607f790e158c | 3,654,738 |
def entmax15(X, axis=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : paddle.Tensor
The input tensor.
axis : int must
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : paddle tensor, same shape as X
The projection result, such that P.sum(axis=axis) == 1 elementwise.
"""
assert axis in [-1, X.ndim - 1]
return Entmax15Function.apply(X, axis, k) | 08887ec5aff323077ea6ea99bf6bd2b83bb4cc19 | 3,654,739 |
def get_dependency_graph(node, targets=None):
"""Returns the dependent nodes and the edges for the passed in node.
:param str node: The node to get dependencies for.
:param list targets: A list with the modules that are used as targets.
:return: The dependency graph info.
:rtype: GraphInfo
"""
g = _make_graph()
edges, direct_dependencies = _all_dependencies(node, g)
if targets:
targets = set(targets)
affected_targets = []
if not edges:
return graph_info.GraphInfo(
graph=g,
nodes=[],
edges=[],
direct_dependencies=[],
affected_targets=[]
)
all_nodes = set()
for n1, n2 in edges:
all_nodes.add(n1)
all_nodes.add(n2)
node_to_info = {}
for index, node_name in enumerate(all_nodes):
if node_name not in node_to_info:
node_id = index + 1
node_to_info[node_name] = {
"id": node_id,
"label": "",
"title": node_name,
"value": 1,
"color": "blue"
}
if targets and node_name in targets:
node_to_info[node_name]["color"] = 'orange'
node_to_info[node_name]["value"] = 3
affected_targets.append(node_name)
node_to_info[node]['color'] = 'red'
node_to_info[node]['value'] = 3
edges_representation = []
for n1, n2 in edges:
index1 = node_to_info[n1]["id"]
index2 = node_to_info[n2]["id"]
edge_color = 'gray'
value = 1
if n1 == node:
node_to_info[n2]['color'] = 'green'
node_to_info[n2]['value'] = 2
edge_color = 'green'
value = 2
if n2 == node:
node_to_info[n1]['color'] = 'green'
node_to_info[n1]['value'] = 2
edge_color = 'green'
value = 2
edges_representation.append(
{
"from": index1,
"to": index2,
"color": edge_color,
"value": value
},
)
info = graph_info.GraphInfo(
graph=g,
nodes=list(node_to_info.values()),
edges=edges_representation,
direct_dependencies=sorted(direct_dependencies),
affected_targets=affected_targets
)
return info | 39667e034379477086062a9032f5007c12aba30e | 3,654,740 |
from pathlib import Path
def is_submodule_repo(p: Path) -> bool:
"""
"""
if p.is_file() and '.git/modules' in p.read_text():
return True
return False | 26675ee25e431778325081ec80d45ff3d72c2046 | 3,654,741 |
def shift_contig(df2, remove):
"""
The function append shifted fragment from
sort_cluster_seq function.
Parameters
----------
df2 : pandas DataFrame
DataFrame NRPS cluster fragment.
remove : list
List of cluster fragment, which should removed.
Returns
-------
df2 : pandas DataFrame
Corrected DataFrame with NRPS meta information.
"""
for gen in remove:
df2 = df2.append(gen)
return df2 | 7df891785fc58d818af5b423c7fdbc3c4382951f | 3,654,742 |
def _bocs_consistency_mapping(x):
"""
This is for the comparison with BOCS implementation
:param x:
:return:
"""
horizontal_ind = [0, 2, 4, 7, 9, 11, 14, 16, 18, 21, 22, 23]
vertical_ind = sorted([elm for elm in range(24) if elm not in horizontal_ind])
return x[horizontal_ind].reshape((ISING_GRID_H, ISING_GRID_W - 1)), x[vertical_ind].reshape((ISING_GRID_H - 1, ISING_GRID_W)) | bd8fe5261e024f5d5cdf1a2d77229dd564d947bf | 3,654,744 |
def get_document(name, key):
"""Get document from Database"""
constructor = Constructor()
inst_coll = constructor.factory(kind='Collection', name=name)
inst_doc = Document(inst_coll)
doc = inst_doc.get_document(key)
return doc | acd4e8117c0002d323a4fad79704a33437481657 | 3,654,745 |
from datetime import datetime
import json
def predict() -> str:
"""predict the movie genres based on the request data"""
cur = db_connection.cursor()
try:
input_params = __process_input(request.data)
input_vec = vectorizer.transform(input_params)
prediction = classifier.predict(input_vec)
predictions = binarizer.inverse_transform(prediction)
for count, i in enumerate(input_params):
pred = ", ".join(predictions[count])
cur.execute(
f"INSERT INTO prediction(input, output, time) VALUES('{i}', '{pred}', '{datetime.datetime.now()}' )"
)
db_connection.commit()
except Exception as e:
response = app.response_class(
response=json.dumps({"error": f"{e.__class__} occured"}), status=400
)
return response
response = app.response_class(
response=json.dumps({"predictions:": binarizer.inverse_transform(prediction)}),
status=200,
)
return response | 0ae49a8ab05d1df1c0beb07f322262a7a7ac8ee2 | 3,654,746 |
def SignificanceWeights(serializer, decay):
"""Multiplies a binary mask with a symbol significance mask."""
def significance_weights(mask):
# (repr,) -> (batch, length, repr)
# significance = [0, 1, 2]
significance = serializer.significance_map
assert significance.shape[0] == mask.shape[2]
# significance = batch_size * [0, 1, 2]
significance = jnp.repeat(
significance[np.newaxis, ...], repeats=mask.shape[0], axis=0)
# significance = batch_size * [0, 1, 2] * mask.shape[1]
significance = jnp.repeat(
significance[..., jnp.newaxis], repeats=mask.shape[1], axis=2)
# significance = batch_size * mask.shape[1] * [0, 1, 2]
significance = jnp.swapaxes(significance, 1, 2)
assert significance.shape == mask.shape
sig_weights = mask * decay ** significance
return sig_weights
return tl.Fn('SignificanceWeights', significance_weights) | 545ac45149b8653f502d2dd864f92a40ee5919cb | 3,654,747 |
def check_fun_inter_allocation(fun_inter, data, **kwargs):
"""Check allocation rules for fun_inter then returns objects if check"""
out = None
check_allocation_fun_inter = get_allocation_object(data, kwargs['xml_fun_inter_list'])
if check_allocation_fun_inter is None:
check_fe = check_fun_elem_data_consumption(
data, fun_inter,
kwargs['xml_fun_elem_list'],
kwargs['xml_function_list'],
kwargs['xml_consumer_function_list'],
kwargs['xml_producer_function_list'])
if all(i for i in check_fe):
out = [fun_inter, data]
fun_inter.add_allocated_data(data.id)
elif True in check_fe:
if check_fe[0] is True:
print(f"Data {data.name} has only consumer(s) "
f"allocated to a functional element exposing "
f"{fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
elif check_fe[1] is True:
print(f"Data {data.name} has only producer(s) "
f"allocated to a functional element exposing "
f"{fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
else:
print(f"Data {data.name} has no producer(s) nor "
f"consumer(s) allocated to functional elements "
f"exposing {fun_inter.name}, {data.name} not "
f"allocated to {fun_inter.name}")
return out | 61f17844953f3260a23aff35a2f090a028dd9212 | 3,654,748 |
from typing import Optional
def kernel_bw_lookup(
compute_device: str,
compute_kernel: str,
caching_ratio: Optional[float] = None,
) -> Optional[float]:
"""
Calculates the device bandwidth based on given compute device, compute kernel, and
caching ratio.
Args:
compute_kernel (str): compute kernel.
compute_device (str): compute device.
caching_ratio (Optional[float]): caching ratio used to determine device bandwidth
if UVM caching is enabled.
Returns:
float: the device bandwidth.
"""
caching_ratio = caching_ratio if caching_ratio else UVM_CACHING_RATIO
lookup = {
# CPU
("cpu", EmbeddingComputeKernel.DENSE.value): 0.35 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.SPARSE.value): 0.35 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_DENSE.value): 0.5 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_FUSED.value): 1 * DDR_MEM_BW,
("cpu", EmbeddingComputeKernel.BATCHED_QUANT.value): 1 * DDR_MEM_BW,
# CUDA
("cuda", EmbeddingComputeKernel.DENSE.value): 0.35 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.SPARSE.value): 0.35 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_DENSE.value): 0.5 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED.value): 1 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED_UVM.value): DDR_MEM_BW / 10,
("cuda", EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value): (
caching_ratio * HBM_MEM_BW + (1 - caching_ratio) * DDR_MEM_BW
)
/ 10,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT.value): 1 * HBM_MEM_BW,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT_UVM.value): DDR_MEM_BW / 10,
("cuda", EmbeddingComputeKernel.BATCHED_QUANT_UVM_CACHING.value): (
caching_ratio * HBM_MEM_BW + (1 - caching_ratio) * DDR_MEM_BW
)
/ 10,
}
return lookup.get((compute_device, compute_kernel)) | efd70d5c2e5fc9295bccbfb05113474ac40ff1c9 | 3,654,749 |
def create_container(
container_image: str,
name: str = None,
volumes: t.List[str] = None,
) -> str:
"""Create a new working container from provided container image.
Args:
container_image (str): The container image to start from.
name (str, optional): The container name.
volumes (t.List[str], optional): Any volumes to bind into the container.
Returns:
str: The container name/id used for further manipulation.
"""
args = []
if name:
args.extend(["--name", name])
if volumes:
args.extend(_unwind_list("--volume", volumes))
command = ["buildah", "from"] + args + [container_image]
result = platform_utils.run_command(command, capture_stdout=True)
container = result.stdout.strip()
logger.success(f"Created '{container}' from image '{container_image}'")
return container | be50e84169e5d3df5dfd9730493d7daa9788049b | 3,654,751 |
def single_data_path(client, node_id):
"""
In order for a shrink to work, it should be on a single filesystem, as
shards cannot span filesystems. Return `True` if the node has a single
filesystem, and `False` otherwise.
:arg client: An :class:`elasticsearch.Elasticsearch` client object
:rtype: bool
"""
return len(client.nodes.stats()['nodes'][node_id]['fs']['data']) == 1 | ae0b34f82acb6d12faf525f0270250cdf471a6f8 | 3,654,752 |
def sortorder(obj):
"""
Trys to smartly determine the sort order for this object ``obj``
"""
if hasattr(obj, 'last'):
return obj.last.timestamp()
if isinstance(obj, str):
# First assume pure numeric
try:
return float(obj)
except ValueError:
pass
# Assume it is of the form
# AB [N.M] PPP words'
try:
return float(obj.split('[')[1].split(']')[0])
except (IndexError, ValueError):
return strip_tags(obj).strip()
return None | 674ee77a87ccd7a0bd89a88b88a2682926a1135e | 3,654,753 |
import json
import re
def get_more_details_of_post(post_url: str) -> json:
"""
:param post_url: the url of an imgur post
:return: Details like Virality-score, username etc in JSON format
"""
details = {}
try:
request = HTMLSession().get(post_url)
# some times, request isn't properly made, hence call again.
if len(request.html.find('script')) < 18:
request = HTMLSession().get(post_url)
return details
# handle when its not there at all
regex = 'item: ({.+} )' # regex to isolate the `item` dict.
# 18th script tag has the `item` dict. this is tested on more than 1500 links.
matched = re.search(regex, request.html.find(
'script')[18].text).group(0)
item = json.loads(matched[5:])
details['username'] = item['account_url']
details['comment_count'] = item['comment_count']
details['downs'] = item['downs']
details['ups'] = item['ups']
details['points'] = item['points']
details['score'] = item['score']
details['timestamp'] = item['timestamp']
details['views'] = item['views']
details['favorite_count'] = item['favorite_count']
details['hot_datetime'] = item['hot_datetime']
details['nsfw'] = item['nsfw']
details['platform'] = 'Not Detected' if item['platform'] == None else item['platform']
details['virality'] = item['virality']
except Exception as e:
print(e)
return details | dd3d622c8a7e8f61daf24c2d0cc6752323d4693e | 3,654,754 |
import struct
import hmac
import hashlib
def subkey_public_pair_chain_code_pair(public_pair, chain_code_bytes, i):
"""
Yield info for a child node for this node.
public_pair:
base public pair
chain_code:
base chain code
i:
the index for this node.
Returns a pair (new_public_pair, new_chain_code)
"""
i_as_bytes = struct.pack(">l", i)
sec = public_pair_to_sec(public_pair, compressed=True)
data = sec + i_as_bytes
I64 = hmac.HMAC(key=chain_code_bytes, msg=data, digestmod=hashlib.sha512).digest()
I_left_as_exponent = from_bytes_32(I64[:32])
x, y = public_pair
the_point = I_left_as_exponent * ecdsa.generator_secp256k1 + \
ecdsa.Point(ecdsa.generator_secp256k1.curve(), x, y, ORDER)
if the_point == INFINITY:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('K_{} == {}'.format(i, the_point))
I_left_as_exponent = from_bytes_32(I64[:32])
if I_left_as_exponent >= ORDER:
logger.critical(_SUBKEY_VALIDATION_LOG_ERR_FMT)
raise DerivationError('I_L >= {}'.format(ORDER))
new_public_pair = the_point.pair()
new_chain_code = I64[32:]
return new_public_pair, new_chain_code | 8f31eb0ae3b063964ff46bcf6c78431d39d0e2ba | 3,654,755 |
from typing import Optional
def get_registry_description(metaprefix: str) -> Optional[str]:
"""Get the description for the registry, if available.
:param metaprefix: The metaprefix of the registry
:return: The description for the registry, if available, otherwise ``None``.
>>> get_registry_description('prefixcommons')
'A registry of commonly used prefixes in the life sciences and linked data'
>>> get_registry_description('missing')
None
"""
registry = get_registry(metaprefix)
if registry is None:
return None
return registry.description | 12b7aac7f880d6699ca85add1065eca49a06d278 | 3,654,756 |
import tqdm
def evaluate(model, valid_exe, valid_ds, valid_prog, dev_count, metric):
"""evaluate """
acc_loss = 0
acc_top1 = 0
cc = 0
for feed_dict in tqdm.tqdm(
multi_device(valid_ds.generator(), dev_count), desc='evaluating'):
if dev_count > 1:
loss, top1 = valid_exe.run(
feed=feed_dict,
fetch_list=[model.metrics[0].name, model.metrics[1].name])
loss = np.mean(loss)
top1 = np.mean(top1)
else:
loss, top1 = valid_exe.run(
valid_prog,
feed=feed_dict,
fetch_list=[model.metrics[0].name, model.metrics[1].name])
acc_loss += loss
acc_top1 += top1
cc += 1
ret = {"loss": float(acc_loss / cc), "top1": float(acc_top1 / cc)}
return ret | 7b228e7cadd71ec1ac31436767b92c4dadb5ec53 | 3,654,757 |
def _get_rank(player):
"""Get the rank of a player"""
cursor = _DB.cursor()
try:
cursor.execute("SELECT score FROM scores WHERE player = ?", (player.lower(),))
rows = cursor.fetchall()
if not rows:
return 0
ps = rows[0][0]
cursor.execute("SELECT count(*) FROM scores WHERE score > ?", (ps,))
rows = cursor.fetchall()
return 1+rows[0][0]
finally:
cursor.close() | e556b9fb75f6b40c8c1be8759255dfc5953a1e9a | 3,654,758 |
import pydoc
def spec(func):
"""return a string with Python function specification"""
doc = pydoc.plain(pydoc.render_doc(func))
return doc.splitlines()[2] | 00b96364f77141fedd7d50396946fd4e29cc5d02 | 3,654,760 |
import posixpath
def IsVirus(mi, log):
"""Test: a virus is any message with an attached executable
I've also noticed the viruses come in as wav and midi attachements
so I trigger on those as well.
This is a very paranoid detector, since someone might send me a
binary for valid reasons. I white-list everyone who's sent me
email before so it doesn't affect me.
"""
for part in mi.msg.walk():
if part.get_main_type() == 'multipart':
continue
filename = part.get_filename()
if filename is None:
if part.get_type() in ["application/x-msdownload",
"audio/x-wav", "audio/x-midi"]:
# Only viruses send messages to me with these types
log.pass_test(VIRUS)
return ("it has a virus-like content-type (%s)" %
part.get_type())
else:
extensions = "bat com exe pif ref scr vbs wsh".split()
base, ext = posixpath.splitext(filename)
if ext[1:].lower() in extensions:
log.pass_test(VIRUS)
return "it has a virus-like attachment (%s)" % ext[1:]
return False | e30e91951ad49395d87bef07926cfdff4d15b3e2 | 3,654,763 |
def to_curl(request, compressed=False, verify=True):
"""
Returns string with curl command by provided request object
Parameters
----------
compressed : bool
If `True` then `--compressed` argument will be added to result
"""
parts = [
('curl', None),
('-X', request.method),
]
for k, v in sorted(request.headers.items()):
parts += [('-H', '{0}: {1}'.format(k, v))]
if request.body:
body = request.body
if isinstance(body, bytes):
body = body.decode('utf-8')
parts += [('-d', body)]
if compressed:
parts += [('--compressed', None)]
if not verify:
parts += [('--insecure', None)]
parts += [(None, request.url)]
flat_parts = []
for k, v in parts:
if k:
flat_parts.append(quote(k))
if v:
flat_parts.append(quote(v).replace("\n", "\\n"))
return ' '.join(flat_parts) | b462f62031f4fe757bb7a45b50ced9bc2ea6a9b5 | 3,654,764 |
def prox_trace_indicator(a, lamda):
"""Time-varying latent variable graphical lasso prox."""
es, Q = np.linalg.eigh(a)
xi = np.maximum(es - lamda, 0)
return np.linalg.multi_dot((Q, np.diag(xi), Q.T)) | 85d6cb26c7a35dbab771e0a9f9c8979fba90e680 | 3,654,765 |
def get_gamma_non_jitted(esys):
"""Get log gamma
Returns
-------
float[:]
"""
if isinstance(esys.species[0].logc, float):
v = np.empty(len(esys.species))
else:
v = np.empty(len(esys.species), dtype=object)
for i, sp in enumerate(esys.species):
v[i] = 10.0 ** (sp.logg)
return v
# return np.array([10.0**(sp.logg) for sp in self.species])
# v = np.empty(len(self.species))
# for i, sp in enumerate(self.species):
# v[i] = 10.0**(sp.logg)
# return v | f3d7f4b96676a10b7065196aac247006019da31e | 3,654,766 |
def active_matrices_from_extrinsic_euler_angles(
basis1, basis2, basis3, e, out=None):
"""Compute active rotation matrices from extrinsic Euler angles.
Parameters
----------
basis1 : int
Basis vector of first rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
basis2 : int
Basis vector of second rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
basis3 : int
Basis vector of third rotation. 0 corresponds to x axis, 1 to y axis,
and 2 to z axis.
e : array-like, shape (..., 3)
Euler angles
out : array, shape (..., 3, 3), optional (default: new array)
Output array to which we write the result
Returns
-------
Rs : array, shape (..., 3, 3)
Rotation matrices
"""
e = np.asarray(e)
R_shape = e.shape + (3,)
R_alpha = active_matrices_from_angles(basis1, e[..., 0].flat)
R_beta = active_matrices_from_angles(basis2, e[..., 1].flat)
R_gamma = active_matrices_from_angles(basis3, e[..., 2].flat)
if out is None:
out = np.empty(R_shape)
out[:] = np.einsum(
"nij,njk->nik", np.einsum("nij,njk->nik", R_gamma, R_beta),
R_alpha).reshape(R_shape)
return out | 50218d9ce2296e3c4952cc77fe64e30c19e03f77 | 3,654,767 |
def runQuery(scenarioID):
"""
Run a query that aquires the data from the lrs for one specific dialoguetrainer scenario
\n
:param scenarioID: The id of the scenario to request the data from \t
:type scenarioID: int \n
:returns: The data for that scenario or error \t
:rtype: [Dict<string, mixed>] | {error} \n
"""
return (
lrs.Query()
.where(lrs.Attr.ACTIVITY, lrs.IS, f"https://en.dialoguetrainer.app/scenario/play/{scenarioID}")
.where(lrs.Attr.VERB, lrs.IS, "https://adlnet.gov/expapi/verbs/completed")
.select(lrs.Attr.ACTOR, lrs.Attr.RESULT)
.execute()
) | 0f57a4468354680b315a65263593979149bdb186 | 3,654,769 |
def is_spaceafter_yes(line):
"""
SpaceAfter="Yes" extracted from line
"""
if line[-1] == "_":
return False
for ddd in line[MISC].split("|"):
kkk, vvv = ddd.split("=")
if kkk == "SpaceAfter":
return vvv == "Yes"
raise ValueError | 5693c8874ec9676bf19d9b1cb7ead5c1772a3f0b | 3,654,770 |
def linear_scheduler(optimizer, warmup_steps, training_steps, last_epoch=-1):
"""linear_scheduler with warmup from huggingface"""
def lr_lambda(current_step):
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
return max(
0.0,
float(training_steps - current_step)
/ float(max(1, training_steps - warmup_steps)),
)
return LambdaLR(optimizer, lr_lambda, last_epoch) | d9446ede5be0ed981ae00b0bccd494017057d834 | 3,654,771 |
from functools import reduce
import operator
from re import X
def MajorityVoteN(qubits,
nrounds,
prep=[],
meas_delay=1e-6,
add_cals=False,
calRepeats=2):
"""
Majority vote across multiple measurement results (same or different qubits)
Parameters
----------
qubits : Channels.LogicalChannel tuple
A hashable (immutable) tuple of qubits for majority vote
nrounds: int
Number of consecutive measurements
prep : boolean iterable, optional
Array of binary values mapping X(q) pulses to the list of qubits
proivided. Ex: (q1,q2), prep=(1,0) -> would apply a pi pulse to q1
before the majority vote measurement. Default = []
measDelay : int/float, optional
Delay between syndrome check rounds (seconds)
add_cals : boolean, optional
Whether to append calibration pulses to the end of the sequence
calRepeats : int, optional
How many times to repeat calibration scalings (default 2)
Returns
-------
metafile : string
Path to a json metafile with details about the sequences and paths to
compiled machine files
Examples
--------
>>> mf = MajorityVoteN((q1, q2, q3), 10);
Compiled 1 sequences.
o INVALIDATE(channel=None, addr=0x1, mask=0x0)
o WRITEADDR(channel=None, addr=0x1, value=0xfffff)
MAJORITYMASK(in_addr=1, out_addr=0)
o INVALIDATE(channel=None, addr=0xa, mask=0xfffff)
o INVALIDATE(channel=None, addr=0xb, mask=0x1)
MAJORITY(in_addr=a, out_addr=b)
>>> mf
'/path/to/exp/exp-meta.json'
"""
nqubits = len(qubits)
seqs = [MajorityMask(1, 0, nrounds*nqubits),
Invalidate(10, nrounds*nqubits),
Invalidate(11, 1)]
if prep:
seqs += [reduce(operator.mul,
[X(q) for n,q in enumerate(qubits) if prep[n]])]
for n in range(nrounds):
seqs += [reduce(operator.mul,
[MEASA(q, (10, nqubits*n+m)) for m,q in enumerate(qubits)]),
Id(qubits[0],meas_delay)]
seqs+=MajorityVote(10,11, nrounds*nqubits)
seqs+=qwait("RAM", 11)
seqs+=[Id(qubits[0],100e-9)]
seqs+=qif(1,[X(qubits[0])]) # placeholder for any conditional operation
seqs=[seqs]
if add_cals:
seqs += create_cal_seqs(qubits,
calRepeats)
metafile = compile_to_hardware(seqs,
'MajorityVote/MajorityVote',
tdm_seq=True)
return metafile | 7bc3b6161d5224ed7adf9248b32b0bd283f50c70 | 3,654,772 |
def getRatios(vect1, vect2):
"""Assumes: vect1 and vect2 are equal length lists of numbers
Returns: a list containing the meaningful values of
vect1[i]/vect2[i]"""
ratios = []
for index in range(len(vect1)):
try:
ratios.append(vect1[index]/vect2[index])
except ZeroDivisionError:
ratios.append(float('nan')) #nan = Not a Number
except:
raise ValueError('getRatios called with bad arguments')
return ratios | e28f871986ab2b1b87cc3671b1c27ad14a0aadf8 | 3,654,773 |
def sampleset():
"""Return list with 50 positive and 10 negative samples"""
pos = [(0, i) for i in range(50)]
neg = [(1, i) for i in range(10)]
return pos + neg | 77e5a0ca3ad8757f0ded2aec9d73312a66ac9044 | 3,654,774 |
def recognize_emotion(name, mode, dataset):
"""
The main program for building the system. And we support following kinds of model:
1. Convolutional Neural Network (CNN)
2. Support Vector Machine (SVM)
3. Adaboost
4. Multilayer Perceptron (MLP)
Args:
name: path of the photo for recognizing
mode: mode used for face detection, 'auto' or 'manual
dataset: dataset used for face recognition, 'CK+48' or 'fer2013'
Returns:
predicted: emotion prediction (numerical) of detected faces using cnn and fisherfaces
recognition: emotion recognition (categorical) of detected faces using cnn and fisherfaces
Note: result will be printed to standard output, accuracy needs to be improved.
"""
# Load the dataset into a shuffled list of tuples
dataset_tuple_list = dp.load_dataset(dataset)
# Split the dataset into train, test, validation and their corresponding labels
img_train, img_train_label, img_validation, img_validation_label, img_test, img_test_label, le = \
dp.split_data(dataset_tuple_list)
# Fisherfaces: Get the fisherfaces_train and fisherfaces_test feature vectors for further training and predicting
fisher_train, fisher_test, fisher_validation, pca, lda = fe.fisherfaces(img_train, img_test, img_validation,
img_train_label, le)
# Construct and train the selected model with the input train and validation datasets
model_trained = mc.train_model('cnn', fisher_train, img_train_label, fisher_validation,
img_validation_label, 'fisherfaces')
# detect faces in photo and get coordinates of them
face_coordinates, resized_list = fd.detect_face(name, mode)
# project faces to fisherfaces
face_column_matrix = fe.constructRowMatrix(np.array(resized_list))
pca_face = pca.transform(face_column_matrix)
fisherfaces_face = lda.transform(pca_face)
# use trained cnn to recognize emotions
fisherfaces_face = fisherfaces_face.reshape(-1, 1, 6)
prediction = model_trained.predict(fisherfaces_face)
recognized = np.argmax(prediction, axis=1)
print(f'\nprediction:\n{prediction}\nrecognized:\n{recognized}')
return prediction, recognized | eae092866f5190a637bbdb08c4ad7188b9cb88f3 | 3,654,775 |
def feedback(request):
"""
Feedback page. Here one can send feedback to improve the
website further.
"""
return render(request, "groundfloor/common/feedback.html", context = None) | 8dd9f8ae57ca49629820c58b54c7d98d705597bb | 3,654,776 |
from typing import Tuple
def fill_nodata_image(dataset: xr.Dataset) -> Tuple[np.ndarray, np.ndarray]:
"""
Interpolate no data values in image. If no mask was given, create all valid masks
:param dataset: Dataset image
:type dataset: xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
:return: a Tuple that contains the filled image and mask
:rtype: Tuple of np.ndarray
"""
if 'msk' in dataset:
img, msk = interpolate_nodata_sgm(dataset['im'].data, dataset['msk'].data)
else:
msk = np.full((dataset['im'].data.shape[0], dataset['im'].data.shape[1]), int(dataset.attrs['valid_pixels']))
img = dataset['im'].data
return img, msk | c245f0cbfbb79737fb85b9b8fb8381aad6373926 | 3,654,777 |
def find(value, a_list):
"""
TestCase for find
>>> find(26, [12,14])
True
>>> find(40, [14, 15, 16, 4, 6, 5])
False
>>> find(1, [1])
False
>>> find(1, [])
False
>>> find(4, [2, 3, 2])
True
"""
# 现将列表变为<value, index>字典
if a_list is None or len(a_list) < 2:
return False
d = {}
for i in range(len(a_list)):
if d.has_key(a_list[i]):
d[a_list[i]] = d[a_list[i]] + 1
else:
d[a_list[i]] = 1
# 第二次遍历
for i in a_list:
if d.has_key(value-i):
# 排除自己本身
x = value == i*2
if(not (x and d[i] == 1)):
return True
return False | dd466a8ffa0c760ed0af9ad109b5f4e3b85a62db | 3,654,778 |
def transform_bbox(
bbox, source_epsg_code, target_epsg_code, all_coords=False
):
"""
Transform bbox from source_epsg_code to target_epsg_code,
if necessary
:returns np.array of shape 4 which represent the two coordinates:
left, bottom and right, top.
When `all_coords` is set to `True`, a np.array of shape 8 is given
which represents coords of the bbox in the following order:
left top, right top, right bottom, left bottom
"""
if source_epsg_code != target_epsg_code:
# XXX: Not entirely sure whether transformations between two projected
# coordinate systems always do retain the rectangular shape of a bbox.
# Transformations between an unprojected system (e.g. WGS84) and a
# projected system (e.g. RDNEW) will experience distortion: the
# resulting shape cannot be accurately represented by top left
# and bottom right.
source_srs = get_spatial_reference(source_epsg_code)
target_srs = get_spatial_reference(target_epsg_code)
if source_srs.IsProjected() != target_srs.IsProjected():
msg = "Transforming a bbox from %s to %s is inaccurate."
logger.warning(msg, source_epsg_code, target_epsg_code)
# Transform to [[left, right],[top, bottom]]
input_x = [bbox[BBOX_LEFT], bbox[BBOX_RIGHT]]
input_y = [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]]
if all_coords:
input_x += [bbox[BBOX_RIGHT], bbox[BBOX_LEFT]]
input_y += [bbox[BBOX_TOP], bbox[BBOX_BOTTOM]]
bbox_trans = np.array(
transform_xys(
np.array(input_x), np.array(input_y),
source_epsg_code, target_epsg_code
)
)
if all_coords:
bbox = np.array([
bbox_trans[0][0], bbox_trans[1][0], # left_top
bbox_trans[0][2], bbox_trans[1][2], # right_top
bbox_trans[0][1], bbox_trans[1][1], # right_bottom
bbox_trans[0][3], bbox_trans[1][3] # left_bottom
])
else:
# Transform back to [left,bottom,right,top]
bbox = np.array(
[min(bbox_trans[0]), min(bbox_trans[1]), # left_bottom
max(bbox_trans[0]), max(bbox_trans[1]) # right_top
]
)
return bbox | cd6938b2dfcc02fe9c2a323e2b60339de216dd26 | 3,654,779 |
def distance_metric(vector1, vector2):
""" Returns a score value using Jaccard distance
Args:
vector1 (np.array): first vector with minHash values
vector2 (np.array): second vector with minHash values
Returns:
float: Jaccard similarity
"""
return distance.pdist(np.array([vector1,vector2]), 'jaccard').sum() | e1acbc9eff7ee8bc78be0307acacbca9e9d69265 | 3,654,780 |
from datetime import datetime
def update_calendar(request):
"""
to update an entry to the academic calendar to be updated.
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
prev_desc - Description for the previous event which is to be updated.
get_calendar_details = Get the object of the calendar instance from the database for the previous Description.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
prev_desc = request.POST.getlist('prev_desc')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first()
get_calendar_details.description = desc
get_calendar_details.from_date = from_date
get_calendar_details.to_date = to_date
get_calendar_details.save()
except Exception as e:
from_date=""
to_date=""
desc=""
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context) | 804f3f0443d192c0c18a501c40808f2406596491 | 3,654,781 |
def get_section(entry: LogEntry) -> str:
"""returns the section of the request (/twiki/bin/edit/Main -> /twiki)"""
section = entry.request.split('/')[:2]
return '/'.join(section) | dee463b5a662846da01fc2ef8d1c72c5b582e7e5 | 3,654,782 |
def reverse_lookup(d, v):
"""
Reverse lookup all corresponding keys of a given value.
Return a lisy containing all the keys.
Raise and exception if the list is empty.
"""
l = []
for k in d:
if d[k] == v:
l.append(k)
if l == []:
raise ValueError
else:
return l | d68f437aec47df964905779f99d58be84515fb72 | 3,654,783 |
def compile_channels_table(*, channels_meta, sources, detectors, wavelengths):
"""Compiles a NIRSChannelsTable given the details about the channels, sources,
detectors, and wavelengths.
"""
table = NIRSChannelsTable()
for channel_id, channel in channels_meta.items():
source_label = sources.label[channel["source_idx"]]
detector_label = detectors.label[channel["detector_idx"]]
source_wavelength = wavelengths[channel["wavelength_idx"]]
table.add_row(
label=f"{source_label}_{detector_label} {source_wavelength:.0f}",
source=channel["source_idx"],
detector=channel["detector_idx"],
source_wavelength=source_wavelength,
)
table.source.table = sources
table.detector.table = detectors
return table | ac3099ef0440962b3fbfeec36f01ae92061b5693 | 3,654,784 |
from pathlib import Path
def cpe2pkg_tool():
"""Unsupported ecosystem CVE fixture."""
bin = Path(__file__).parent.parent / Path('tools/bin/cpe2pkg.jar')
if bin.exists():
return str(bin)
else:
raise RuntimeError('`cpe2pkg.jar` is not available, please run `make build-cpe2pkg once.`') | 7ad5489cd560f2820a5e77c46964514a5a34edc9 | 3,654,785 |
import threading
def spawn_thread(func, *args, **kwds):
"""
Utility function for creating and starting a daemonic thread.
"""
thr = threading.Thread(target=func, args=args, kwargs=kwds)
thr.setDaemon(True)
thr.start()
return thr | afaace7e02870390acb297106ac9d35c9a931a59 | 3,654,786 |
import uuid
def get_thread_replies(parent_id):
"""
Get all replies to a thread
If the thread does not exist, return an empty list
:param parent_id: Thread ID
:return: replies to thread
"""
assert type(parent_id) is uuid.UUID, """parent_id is not correct type"""
reply_query = Query()
results = db.search(reply_query.parent == str(parent_id))
return results | 1b167dcc4ab09d50cda9feb478c8f1a4d0399e96 | 3,654,788 |
import torch
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred) | 1b1ad83b9b4ae06f2bc80209e4e7339a421a39f3 | 3,654,789 |
async def read_update_status() -> str:
"""Read update status."""
return (
await cache.get(Config.update_status_id())
if await cache.exists(Config.update_status_id())
else "ready_to_update"
) | 0e80f7065665dbe1a41e59fe7c65904e58bb6d8f | 3,654,790 |
def PCopy (inFA, err):
"""
Make copy an GPUFArray
returns copy
* inFA = input Python GPUFArray
* err = Python Obit Error/message stack
"""
################################################################
# Checks
if not PIsA(inFA):
print("Actually ",inFA.__class__)
raise TypeError("inFA MUST be a Python Obit GPUFArray")
outFA = GPUFArray("None")
outFA.me = Obit.GPUFArrayCopy (inFA.me, outFA.me, err.me);
if err.isErr:
OErr.printErrMsg(err, "Error copying GPUFArray")
return outFA
# end PCopy | df1047dc143fb5f8d8f4fd88a2b1ebc0904620a2 | 3,654,792 |
def _get_statuses(policy_type_id, policy_instance_id):
"""
shared helper to get statuses for an instance
"""
_instance_is_valid(policy_type_id, policy_instance_id)
prefixes_for_handler = "{0}{1}.{2}.".format(HANDLER_PREFIX, policy_type_id, policy_instance_id)
return list(SDL.find_and_get(A1NS, prefixes_for_handler).values()) | fdddc26d3c2b65834d4b047a5565894b0d965f9d | 3,654,793 |
def phase_lines(graph):
""" Determines the phase lines of a graph.
:param graph: Graph
:return: dictionary with node id : phase in cut.
"""
if has_cycles(graph):
raise ValueError("a cyclic graph will not have phaselines.")
phases = {n: 0 for n in graph.nodes()}
q = graph.nodes(in_degree=0)
while q:
n = q.pop(0)
level = phases[n]
children = graph.nodes(from_node=n)
for c in children:
if phases[c] <= level:
phases[c] = level + 1
q.append(c)
return phases | 9f1aab9e487bd258c88b0f149bcf613341945879 | 3,654,794 |
def BCELossConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Creates a criterion that measures the Binary Cross Entropy
between the target and the output:
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad
l_n = - w_n \\left[ y_n \\cdot \\log x_n + (1 - y_n) \\cdot \\log (1 - x_n) \\right],
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\\ell(x, y) = \\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets :math:`y` should be numbers
between 0 and 1.
Notice that if :math:`x_n` is either 0 or 1, one of the log terms would be
mathematically undefined in the above loss equation. PyTorch chooses to set
:math:`\\log (0) = -\\infty`, since :math:`\\lim_{x\\to 0} \\log (x) = -\\infty`.
However, an infinite term in the loss equation is not desirable for several reasons.
For one, if either :math:`y_n = 0` or :math:`(1 - y_n) = 0`, then we would be
multiplying 0 with infinity. Secondly, if we have an infinite loss value, then
we would also have an infinite term in our gradient, since
:math:`\\lim_{x\\to 0} \\frac{d}{dx} \\log (x) = \\infty`.
This would make BCELoss's backward method nonlinear with respect to :math:`x_n`,
and using it for things like linear regression would not be straight-forward.
Our solution is that BCELoss clamps its log function outputs to be greater than
or equal to -100. This way, we can always have a finite loss value and a linear
backward method.
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same
shape as input.
Examples::
>>> m = nn.Sigmoid()
>>> loss = nn.BCELoss()
>>> input = torch.randn(3, requires_grad=True)
>>> target = torch.empty(3).random_(2)
>>> output = loss(m(input), target)
>>> output.backward()"""
argument_parser.add_argument(
"--weight",
help="""a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of
size `nbatch`.""",
required=True,
)
argument_parser.add_argument(
"--size_average",
type=bool,
help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in
the batch. Note that for some losses, there are multiple elements per sample. If the field
:attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``.""",
default=True,
)
argument_parser.add_argument(
"--reduce",
type=bool,
help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations
for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a
loss per batch element instead and ignores :attr:`size_average`.""",
default=True,
)
argument_parser.add_argument(
"--reduction",
help="""Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no
reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and
:attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of
those two args will override :attr:`reduction`.""",
required=True,
default="mean",
)
argument_parser.add_argument(
"--__constants__", type=str, action="append", required=True, default="reduction"
)
return argument_parser | d0108459bdad9b2f6fad438bff542624b482ef7d | 3,654,797 |
def gen_cities_avg(climate, multi_cities, years):
"""
Compute the average annual temperature over multiple cities.
Args:
climate: instance of Climate
multi_cities: the names of cities we want to average over (list of str)
years: the range of years of the yearly averaged temperature (list of
int)
Returns:
a pylab 1-d array of floats with length = len(years). Each element in
this array corresponds to the average annual temperature over the given
cities for a given year.
"""
# MY_CODE
return np.array([np.mean([np.mean(climate.get_yearly_temp(city, year))
for city in multi_cities]) for year in years]) | 9609add6a1514d09b42e2494e56c84522d3cb364 | 3,654,798 |
def tangentVectorsOnSphere( points, northPole = np.array([0.0,0.0,1.0]) ):
"""
Acquire a basis for the tangent space at given points on the surface of the unit sphere.
:param points: N x 3 array of N points at which to acquire basis of tangent space.
:param northPole: 3 array of point corresponding to the north pole.
:return A N x 3 x 3 array. Each point has three orthogonal tangent vectors of unit length.
They are constructed such as the first vector is pointing towards the 'northPole'.
The second vector is orthogonal to both the first vector and the vector from the origin to the point of interest.
The third vector is equal to the vector between the origin and the point of interest.
The last dimension represent the elements of the vectors. The next to last dimension indices the vectors
"""
vectors = np.zeros( (points.shape[0], 3,3) )
# Get third vector
vectors[:, 2, :] = points / np.linalg.norm(points, axis= 1).reshape((-1,1))
# Get second vector
vectors[:, 1, :] = np.cross( northPole.reshape( (1,3) ), vectors[:,2,:] )
# Get first vector
vectors[:, 0, :] = np.cross( vectors[:,2,:], vectors[:,1,:] )
# Normalize vectors
lengths = np.linalg.norm( vectors, axis=2 ).reshape((-1, 3))
inds = np.any( lengths == 0.0, axis=1 )
vectors[inds, :, : ] = np.nan
vectors[~inds, :, :] = vectors[~inds, :, :] / lengths[~inds, :].reshape( (-1,3,1) )
return vectors | bfa23a393ac4d1b38c6c2b19207520db1bd83e03 | 3,654,799 |
import tkinter
def _colorvar_patch_destroy(fn):
"""Internal function.\n
Deletes the traces if any when widget is destroy."""
def _patch(self):
"""Interanl function."""
if self._tclCommands is not None:
# Deletes the widget from the _all_traces_colorvar
# and deletes the traces too.
for key, value in dict(_all_traces_colorvar).items():
if self == key[0]:
var, cbname = value
try:
var.trace_vdelete('w', cbname)
except tkinter.TclError:
pass
_all_traces_colorvar.pop(key)
return fn(self)
return _patch | d38380316932d8ff2fee8bed8b931b5567588774 | 3,654,800 |
def pres_from_hybrid(psfc, hya, hyb, p0=100000.):
"""Return pressure field on hybrid-sigma coordinates,
assuming formula is
p = a(k)*p0 + b(k)*ps.
"""
return hya*p0 + hyb*psfc | 4ebd90fb807ab9ea4c2b45d27da6f8b420c107f7 | 3,654,802 |
import urllib
def url_exist(file_url):
""" Check if an url exist
Parameters
----------
file_url : string
url of www location
Returns
-------
verdict : dtype=boolean
verdict if present
"""
try:
urllib.request.urlopen(file_url).code == 200
return True
except:
return False | 717ee7073ab56e8611eb46f042ab7c18f2db0f33 | 3,654,803 |
from scipy import stats
def chi_square(observed, expected):
"""
Compute the chi square test
"""
# glen cowan pp61
temp = []
for (n, nu) in zip(observed, expected):
if nu != 0:
temp += [((n - nu) ** 2) / nu]
# compute p value
mychi = sum(temp)
p = stats.chi2.sf(mychi, len(temp))
return mychi, p | 4b0577ec4e4b4dc6b99b00a54e78f1014b9cf93a | 3,654,804 |
def theta_8(p, q, h, phi, a, b):
"""Lower limit of integration for the case rho > a, rho > b."""
result = np.arctan(r_8(p, q, phi, a, b)/h)
return(result) | 913ceb462885fba93cbdb6bddaa5523c119821bc | 3,654,806 |
def collect_genewise(fst_file, file_name, gene_names, gene_to_fst):
"""take in the file name, opens it.
populates a dictionary to [gene] = fst
file_name = defaultdict(str)
FBgn0031208 500000 16 0.002 21.0 1:2=0.05752690
"""
file_name = file_name.split("_gene")[0]
f_in = open(fst_file, "r")
for line in f_in:
if test_line(line):
data = line.split()
if "1:2=" in line:
gene = data[0].strip()
gene_names.add(gene)
fst = data[5].strip()
fst = fst.split("=")[1]
data = "%s\t%s" % (gene, fst)
gene_to_fst[file_name].append(data)
return gene_to_fst, gene_names | ea62da67a7084103859244bf7f192c2f4433124c | 3,654,807 |
import torch
def bbox_overlaps_batch(anchors, gt_boxes):
"""
:param anchors: (N, 4) ndarray of float
:param gt_boxes: (b, K, 5) ndarray of float
:return: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) - torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) - torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 4:
anchors = anchors[:,:,:4].contiguous()
else:
anchors = anchors[:,:,1:5].contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) - torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) - torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps | cc5a88e6a1d5cd42b1827091cbee311e4f33bbb6 | 3,654,808 |
from typing import Tuple
from typing import Callable
from typing import Any
import re
def extract_curve_and_test(curve_names: str, name: str) -> Tuple[str, Callable[[Any], bool]]:
"""Return a curve and a test to apply for which of it's components to twist."""
twist_match = re.match(rf"(?P<curve>[{curve_names}])_(?P<n>-?\d+)$", name)
twist_index_match = re.match(rf"(?P<curve>[{curve_names}])\[ *(?P<n>-?\d+) *\]$", name)
twist_slice_match = re.match(rf"(?P<curve>[{curve_names}])(\[ *(?P<start>-?\d*) *: *(?P<stop>-?\d*) *(: *(?P<step>-?\d*) *)?\])?$", name)
twist_expr_match = re.match(rf"(?P<curve>[{curve_names}])\{{(?P<expr>.*)\}}$", name)
if twist_match is not None:
parameters = twist_match.groupdict()
curve = parameters["curve"]
n = int(parameters["n"])
test = lambda edge: edge == n
elif twist_index_match is not None:
parameters = twist_index_match.groupdict()
curve = parameters["curve"]
n = int(parameters["n"])
test = lambda edge: edge == n
elif twist_slice_match is not None:
parameters = twist_slice_match.groupdict()
curve = parameters["curve"]
start = int(parameters["start"]) if parameters["start"] else -inf
stop = int(parameters["stop"]) if parameters["stop"] else inf
step = int(parameters["step"]) if parameters["step"] else 1
test = lambda edge: start <= edge < stop and (edge % step == (0 if start == -inf else start % step))
elif twist_expr_match is not None:
parameters = twist_expr_match.groupdict()
curve = parameters["curve"]
test = lambda n: eval(parameters["expr"], {"n": n, **globals()}) # pylint: disable=eval-used
else:
raise ValueError(f"Unknown mapping class {name}")
return curve, test | e4849ff7145bae0c2c900d0aa747ec7a14fb96ac | 3,654,809 |
import numpy
def psf_gaussian(psf_shape, psf_waist, psf_physical_size=1, psf_nphoton=2):
"""Return 3D gaussian approximation of PSF."""
def f(index):
s = psf_shape[index] // 2 * psf_physical_size
c = numpy.linspace(-s, s, psf_shape[index])
c *= c
c *= -2.0 / (psf_waist[index] * psf_waist[index])
return c
psf = numpy.exp(
numpy.sum(
numpy.meshgrid(f(0), f(1), f(2), indexing='ij', sparse=False),
axis=0,
)
)
if psf_nphoton != 1:
numpy.power(psf, psf_nphoton, out=psf)
return psf | 77ccab6aaa141564751a0eafd13398f904673006 | 3,654,810 |
def get_employee_record(id_num):
"""Gets an employee's details if record exists.
Arguments:
id_num -- ID of employee record to fetch
"""
if not id_num in names or not id_num in cities:
return 'Error viewing record'
return f'{id_num} {names[id_num]} {cities[id_num]}' | 108b6e3482022e8e65e09bda1dd8a78ca7850cfe | 3,654,811 |
def list_aliases():
"""
Gets the list of aliases for the current account. An account has at most one alias.
:return: The list of aliases for the account.
"""
try:
response = iam.meta.client.list_account_aliases()
aliases = response['AccountAliases']
if len(aliases) > 0:
logger.info("Got aliases for your account: %s.", ','.join(aliases))
else:
logger.info("Got no aliases for your account.")
except ClientError:
logger.exception("Couldn't list aliases for your account.")
raise
else:
return response['AccountAliases'] | 13fa5d4ded6811bbcbd6062cf7f690b08c41354e | 3,654,812 |
def MapToSingleIncrease(val):
"""
Need 30 minute values to be sequential for some of the tools(i.e. 1,2,3,4) so using a format
like 5,10,15,20 won't work.
"""
return val/5 | fe89d7ccb8bef511e2ad90a07ad0346c58ba894d | 3,654,813 |
def get_columns_for_table(instance, db, table):
""" Get a list of columns in a table
Args:
instance - a hostAddr object
db - a string which contains a name of a db
table - the name of the table to fetch columns
Returns
A list of columns
"""
conn = connect_mysql(instance)
cursor = conn.cursor()
ret = list()
param = {'db': db, 'table': table}
sql = ("SELECT COLUMN_NAME "
"FROM information_schema.columns "
"WHERE TABLE_SCHEMA=%(db)s AND"
" TABLE_NAME=%(table)s")
cursor.execute(sql, param)
for column in cursor.fetchall():
ret.append(column['COLUMN_NAME'])
return ret | 567a7e3e6ebbf33cee3cb088e1725bd2b11edcef | 3,654,814 |
def registra_aluno(nome, ano_entrada, ano_nascimento, **misc):
"""Cria a entrada do registro de um aluno."""
registro = {'nome': nome,
'ano_entrada': ano_entrada,
'ano_nascimento': ano_nascimento}
for key in misc:
registro[key] = misc[key]
return registro | e56da99ec90de9ebca204ccc3c3f3555b9bbbc64 | 3,654,815 |
def create_small_table(small_dict):
"""
Create a small table using the keys of small_dict as headers. This is only
suitable for small dictionaries.
Args:
small_dict (dict): a result dictionary of only a few items.
Returns:
str: the table as a string.
"""
keys, values = tuple(zip(*small_dict.items()))
table = tabulate(
[values],
headers=keys,
tablefmt="pipe",
floatfmt=".3f",
stralign="center",
numalign="center",
)
return table | 08da78580fbf4cee8c30acb21ce7fa928a9c17b1 | 3,654,818 |
def get_normalized_list_for_every_month(variable_r, list_of_ranges_r, tags_r):
"""
:param variable_r: big list with all the data [sizes][months]
:param list_of_ranges_r: sorted list of range (sizes...Enormous, etc.)
:return: normalized list for each month (numbers are percentage respect to the total bytes/requests in a given month)
"""
number_of_months = len(tags_r)
temp_list = [[] for lil in range(0, number_of_months)]
total_requests_in_each_month = [[] for lil in range(0, number_of_months)]
maxima_each_month = [[] for lil in range(0, number_of_months)]
new_list_normalized = [[] for lil in range(0, number_of_months)]
for month in range(0, number_of_months):
for ciao in range(0, len(list_of_ranges_r), 1):
temp_list[month].append(variable_r[ciao][month]) # change second index to change the month: 0,1,2,...23
for month in range(0, number_of_months):
total_requests_in_each_month[month] = float(sum(temp_list[month]))
#print("total bytes requested in month 0: %f" % total_requests_in_each_month[0])
# list of maxima for each month
for month in range(0, number_of_months):
maxima_each_month[month] = max(temp_list[month])
#print("maxima for the first month: %d", maxima_each_month[0])
for month in range(0, number_of_months):
for zeta in temp_list[month]:
new_list_normalized[month].append((zeta/total_requests_in_each_month[month])*100)
return new_list_normalized | 4a3b837e6bf254dbd3255a8ca0a5d103d34bd2a9 | 3,654,819 |
def mark_as_possible_cluster_member(g, possible_cluster_member, cluster, confidence, system, uri_ref=None):
"""
Mark an entity or event as a possible member of a cluster.
:param rdflib.graph.Graph g: The underlying RDF model
:param rdflib.term.URIRef possible_cluster_member: The entity or event to mark as a possible
member of the specified cluster
:param rdflib.term.URIRef cluster: The cluster to associate with the possible cluster member
:param float confidence: The confidence with which to mark the cluster membership
:param rdflib.term.URIRef system: The system object for the system which marked the specified cluster
:param str uri_ref: A string URI representation of the cluster member (Default is None)
:returns: The cluster membership assertion
:rtype: rdflib.term.BNode
"""
cluster_member_assertion = _make_aif_resource(g, uri_ref, AIDA_ANNOTATION.ClusterMembership, system)
g.add((cluster_member_assertion, AIDA_ANNOTATION.cluster, cluster))
g.add((cluster_member_assertion, AIDA_ANNOTATION.clusterMember, possible_cluster_member))
mark_confidence(g, cluster_member_assertion, confidence, system)
return cluster_member_assertion | 851da7d12781723c7c2fb4bc13ac14172c890daf | 3,654,820 |
def twodcontourplot(tadata_nm, tadata_timedelay, tadata_z_corr):
"""
make contour plot
Args:
tadata_nm: wavelength array
tadata_timedelay: time delay array
tadata_z_corr: matrix of z values
"""
timedelayi, nmi = np.meshgrid(tadata_timedelay, tadata_nm)
# find the maximum and minimum
# these are used for color bar
z_min = np.amin(np.amin(tadata_z_corr, axis=1))
z_max = np.amax(np.amax(tadata_z_corr, axis=1))
return [nmi, timedelayi, z_min, z_max] | 2e8850e1c8153c9c307ff785ddd1d1d127163190 | 3,654,821 |
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags | ffc2fd47bbee7d2124da199d6b5101103500fbf2 | 3,654,822 |
def count_good_deals(df):
"""
7. Считает число прибыльных сделок
:param df: - датафрейм с колонкой '<DEAL_RESULT>'
:return: - число прибыльных сделок
"""
# http://stackoverflow.com/questions/27140860/count-occurrences-of-number-by-column-in-pandas-data-frame?rq=1
return (df['<DEAL_RESULT>'] > 0).sum() | 1f3ef9b9e0f7924d45d5ce84a77938f19386b6bc | 3,654,823 |
import time
import itertools
import sh
def match_lines_by_hausdorff(target_features, match_features, distance_tolerance,
azimuth_tolerance=None, length_tolerance=0, match_features_sindex=None, match_fields=False,
match_stats=False, field_suffixes=('', '_match'), match_strings=None, constrain_target_features=False,
target_features_sindex=None, match_vectors=False, expand_target_features=False,
closest_match=False, closest_target=False, verbose=False):
"""Conflate attributes between line features based on Hausdorff distance.
target_features : :class:`geopandas.GeoDataFrame`
Features to which ``match_features`` will be matched.
Must have LineString geometries.
All ``target_features`` will be included in output, with or without a match.
match_features : :class:`geopandas.GeoDataFrame`
Features to be matched to ``target_features``.
Must have LineString geometries.
Only successfully matched features will be included in output.
Multiple ``match_features`` may be matched to a single target feature.
Must have the same spatial reference as ``target_features``.
distance_tolerance : :obj:`float`
Maximum Hausdorff distance between each target feature and candidate ``match_features``
Because directed Hausdorff distances are calculated from target to match
and match to target, ``distance_tolerance`` will be assessed based on
the smaller of these two values.
If feature segments are matched (e.g., 1:n, m:1, or m:n),
Hausdorff distances are calculated for each segment.
In spatial unit of ``target_features``.
azimuth_tolerance : :obj:`float`, optional, default = ``None``
Maximum azimuth difference (in degrees) between target feature and potential match features.
Feature azimuths are calculated as the azimuth of the feature's "major axis"
(the longest axis of the feature's minimum bounding rectangle).
If feature segments are matched (e.g., 1:n, m:1, or m:n),
azimuths are calculated for each segment.
length_tolerance : :obj:`float`, optional, default = 0
Proportion of target feature length required for potential match features.
For example, 0.25 specifies that a match candidates must be at least 25% as long as
target features to be viable matches.
Must be between 0 and 1. If target and match features are split, length proportions
are calculated between split segments, not original features.
match_features_sindex : :class:`rtree.index.Index`, optional, default = ``None``
Spatial index for ``match_features``.
If provided, will not have to be constructed for each function call.
match_fields : :obj:`bool`, optional, default = ``False``
* ``True``: Fields from match features will be included in output.
* ``False``: Only row indices for match features will be included in output.
match_stats : :obj:`bool`, optional, default = ``False``
* ``True``: Statistics related to tolerances will be included in output.
* ``False``: No match statistics will be included in ouput.
field_suffixes : :obj:`tuple`, optional, default = ``('', '_match')``
Suffixes to be appended to output field names for ``target_features``
and ``match_features``, respectively.
Only used if ``match_stats=True``.
match_strings : :obj:`tuple`, optional, default = ``None``
Fields used to compute fuzzy string comparisions.
Typically, these are street name fields for the ``target_features``
and ``match_features``, respectively.
String comparisions do not affect matches, but can be post-processed to
help assess match quality.
constrain_target_features : :obj:`bool`, optional, default = ``False``
* ``True``: Extents of ``match_features``, plus a ``distance_tolerance`` buffer,
will be used to select relevent ``target_features`` prior to matching.
When the extent or number of ``match_features`` is small relative to
``target_features``, this dramatically improves performance because fewer
``target_features`` are analyzed for potential matches.
* ``False``: All ``target_features`` are analyzed for potential matches.
target_features_sindex : :class:`rtree.index.Index`, optional, default = ``None``
If ``constrain_target_features=True``, a spatial index for the ``target_features``
will be computed unless one is provided. If the same ``target_features`` are specified
in multiple function calls, pre-computing a spatial index will improve performance.
If ``constrain_target_features=False``, ``target_features_sindex`` is unnecessary.
match_vectors : :obj:`bool`, optional, default = ``False``
* ``True``: Constructs LineStrings between midpoint of ``target_features`` and the
closest points along matched ``match_features``. Useful for vizualizing match results.
expand_target_features : :obj:`bool`, optional, default = ``False``
* ``True`` : Target features that match to multiple ``match_features`` will be expanded
into multiple segments, each corresponding to a single match feature. Each target feature
segment will be output as a seperate record with an index field identifying original
row-wise indices from ``target_features``.
closest_match : :obj:`bool`, optional, default = ``False``
* ``True`` : Only the closest available match feature will be matched to each target
feature, based on Hausdorff distance
* ``False`` : All available match features will match to each target feature
closest_target : :obj:`bool`, optional, default = ``False``
* ``True`` : A target feature will only match with a match feature if it is the closest
available target, based on Hausdorff distance
* ``False`` : A target feature will match with all available match features, regardless
of whether it has also matched with other target features
verbose : :obj:`bool`, optional, default = ``False``
* ``True`` : Reports status by printing to standard output
"""
# Copy input features to the function doesn't modify the originals
target_features = target_features.copy()
match_features = match_features.copy()
original_target_feature_columns = target_features.columns
original_crs = target_features.crs
if verbose:
start = time()
length = len(target_features)
counter = 0
# Constrain target features to those near available match features
if constrain_target_features:
if not target_features_sindex:
target_features_sindex = target_features.sindex
nearby_target_idx = []
for match_feature in match_features.geometry:
nearby_target_idx.extend(
list(target_features_sindex.intersection(
match_feature.buffer(distance_tolerance).bounds)))
nearby_target_idx = list(set(nearby_target_idx))
operating_target_features = target_features[['geometry']].iloc[nearby_target_idx].copy()
else:
operating_target_features = target_features[['geometry']].copy()
# Make a spatial index for match features, if one isn't supplied
if not match_features_sindex:
match_features_sindex = match_features.sindex
# Initiate lists to store match results
match_indices = []
match_types = []
h_tms_matches = []
t_props_matches = []
t_segs_matches = []
t_linrefs_matches = []
h_mts_matches = []
m_props_matches = []
m_segs_matches = []
m_linrefs_matches = []
if match_vectors:
match_vectors = []
# Iterate through target features:
for i, target in enumerate(operating_target_features.geometry):
# Initiate lists to store matches
m_ids = []
m_types = []
h_tms = []
t_props = []
t_segs = []
t_linrefs = []
h_mts = []
m_props = []
m_segs = []
m_linrefs = []
# Only analyze targets with length
if target.length > 0:
# Roughly filter candidates with a spatial index
search_area = target.buffer(distance_tolerance).bounds
candidate_IDs = list(match_features_sindex.intersection(search_area))
candidates = match_features[['geometry']].iloc[candidate_IDs].reset_index()
# Calculate Hausdorff distances from feature to each candidate (h_fc)
h_tm_list = [directed_hausdorff(target, candidate) for candidate in candidates.geometry]
candidates['h_tm'] = pd.Series(h_tm_list)
# Calculate Hausdorff distances from each candidate to feature (h_cf)
h_mt_list = [directed_hausdorff(candidate, target) for candidate in candidates.geometry]
candidates['h_mt'] = pd.Series(h_mt_list)
# Define function to compare major axis azimuths
def azimuth_match(target, candidate, azimuth_tolerance):
if azimuth_tolerance:
target_azimuth = major_axis_azimuth(target)
candidate_azimuth = major_axis_azimuth(candidate)
azimuth_difference_ = azimuth_difference(target_azimuth, candidate_azimuth, directional=False)
if azimuth_difference_ <= azimuth_tolerance:
return True
else:
return False
else:
return True
# Examine each candidate's relationship to the target feature
for candidate in candidates.itertuples():
# Only analyze candidates with length
if candidate.geometry.length > 0:
# Initialize default match values
m_type = None
h_tm = None
t_prop = None
t_seg = None
t_linref = None
h_mt = None
m_prop = None
m_seg = None
m_linref = None
# 1:1
if (
(candidate.h_tm <= distance_tolerance) and
(candidate.h_mt <= distance_tolerance) and
# Check that azimuth is acceptable
azimuth_match(target, candidate.geometry, azimuth_tolerance) and
# Check relative length
(abs(candidate.geometry.length - target.length) <
(1- length_tolerance) * target.length)):
# Whole target matches candidate
h_tm = candidate.h_tm
t_prop = 1
t_seg = target
t_linref = (0, target.length)
# Whole candidate matches target
h_mt = candidate.h_mt
m_prop = 1
m_seg = candidate.geometry
m_linref = (0, candidate.geometry.length)
m_type = '1:1'
# m:1
elif (
(candidate.h_tm <= distance_tolerance) and
(candidate.h_mt > distance_tolerance)):
# Find the candidate segment matching the target
candidate_seg = find_parallel_segment(target, candidate.geometry)
if (candidate_seg and
candidate_seg.length > 0 and
azimuth_match(target, candidate_seg, azimuth_tolerance) and
# Check relative length
(abs(candidate_seg.length - target.length) <
(1- length_tolerance) * target.length)):
# Whole target matches candidate
h_tm = directed_hausdorff(target, candidate_seg)
t_prop = 1
t_seg = target
t_linref = (0, target.length)
# Calculate proportion of candidate included in segment
h_mt = directed_hausdorff(candidate_seg, target)
m_prop = candidate_seg.length / candidate.geometry.length
m_seg = candidate_seg
m_linref = segment_linear_reference(candidate.geometry, candidate_seg)
m_type = 'm:1'
# 1:n
elif (
(candidate.h_tm > distance_tolerance) and
(candidate.h_mt <= distance_tolerance)):
# Find the target segment matching the candidate
target_seg = find_parallel_segment(
candidate.geometry, target, snap_distance=distance_tolerance)
if (target_seg and
target_seg.length > 0 and
azimuth_match(target_seg, candidate.geometry, azimuth_tolerance) and
# Check relative length
(abs(candidate.geometry.length - target_seg.length) <
(1- length_tolerance) * target_seg.length)):
# Calculate proportion of target included in segment
h_tm = directed_hausdorff(target_seg, candidate.geometry)
t_prop = target_seg.length / target.length
t_seg = target_seg
t_linref = segment_linear_reference(target, target_seg)
# Whole candidate matches target
h_mt = directed_hausdorff(candidate.geometry, target_seg)
m_prop = 1
m_seg = candidate.geometry
m_linref = (0, candidate.geometry.length)
m_type = '1:n'
# potential m:n
elif (
(candidate.h_tm > distance_tolerance) and
(candidate.h_mt > distance_tolerance)):
# See if parallel segments can be identified
target_seg = find_parallel_segment(
candidate.geometry, target, snap_distance=distance_tolerance)
candidate_seg = find_parallel_segment(
target, candidate.geometry)
# Measure hausdorff distance (non-directed) between parallel segments
if target_seg and candidate_seg:
h_tm_seg = directed_hausdorff(target_seg, candidate_seg)
h_mt_seg = directed_hausdorff(candidate_seg, target_seg)
if ((h_tm_seg <= distance_tolerance) and
(h_mt_seg <= distance_tolerance) and
target_seg.length > 0 and
candidate_seg.length > 0 and
azimuth_match(target_seg, candidate_seg, azimuth_tolerance) and
# Check relative length
(abs(candidate_seg.length - target_seg.length) <
(1- length_tolerance) * target_seg.length)):
h_tm = h_tm_seg
t_prop = target_seg.length / target.length
t_seg = target_seg
t_linref = segment_linear_reference(target, target_seg)
h_mt = h_mt_seg
m_prop = candidate_seg.length / candidate.geometry.length
m_seg = candidate_seg
m_linref = segment_linear_reference(candidate.geometry, candidate_seg)
m_type = 'm:n'
if t_prop is not None:
m_ids.append(candidate.index)
m_types.append(m_type)
h_tms.append(h_tm)
t_props.append(t_prop)
t_segs.append(t_seg)
t_linrefs.append(t_linref)
h_mts.append(h_mt)
m_props.append(m_prop)
m_segs.append(m_seg)
m_linrefs.append(m_linref)
# Record match stats
match_indices.append(m_ids)
match_types.append(m_types)
h_tms_matches.append(h_tms)
t_props_matches.append(t_props)
t_segs_matches.append(t_segs)
t_linrefs_matches.append(t_linrefs)
h_mts_matches.append(h_mts)
m_props_matches.append(m_props)
m_segs_matches.append(m_segs)
m_linrefs_matches.append(m_linrefs)
# Construct match vector
if isinstance(match_vectors, list):
vectors = []
for t_seg, m_seg in zip(t_segs_matches, m_segs_matches):
if t_seg and m_seg:
vectors.append(LineString([midpoint(t_seg), midpoint(m_seg)]))
match_vectors.append(vectors)
# Report status
if verbose:
if counter % round(length / 10) == 0 and counter > 0:
percent_complete = (counter // round(length / 10)) * 10
minutes = (time()-start) / 60
print('{}% ({} segments) complete after {:04.2f} minutes'.format(percent_complete, counter, minutes))
counter += 1
# Merge joined data with target features
operating_target_features['match_index'] = pd.Series(
match_indices, index=operating_target_features.index)
operating_target_features['match_type'] = pd.Series(
match_types, index=operating_target_features.index)
operating_target_features['h_tm'] = pd.Series(
h_tms_matches, index=operating_target_features.index)
operating_target_features['t_prop'] = pd.Series(
t_props_matches, index=operating_target_features.index)
operating_target_features['t_seg'] = pd.Series(
t_segs_matches, index=operating_target_features.index)
operating_target_features['t_linref'] = pd.Series(
t_linrefs_matches, index=operating_target_features.index)
operating_target_features['h_mt'] = pd.Series(
h_mts_matches, index=operating_target_features.index)
operating_target_features['m_prop'] = pd.Series(
m_props_matches, index=operating_target_features.index)
operating_target_features['m_seg'] = pd.Series(
m_segs_matches, index=operating_target_features.index)
operating_target_features['m_linref'] = pd.Series(
m_linrefs_matches, index=operating_target_features.index)
if isinstance(match_vectors, list):
operating_target_features['match_vectors'] = pd.Series(
match_vectors, index=operating_target_features.index)
# Store original target feature IDs
operating_target_features = operating_target_features.reset_index().rename(columns={'index': 'target_index'})
# Expand targets with more than one match
# Look for lists of match IDs in each row
expanded_targets = []
for i, target in enumerate(operating_target_features.itertuples()):
if isinstance(target.match_index, list):
# Make duplicate rows for each match ID with respective attributes
for j, match in enumerate(target.match_index):
new_row = target._asdict()
new_row.pop('Index', None)
for key, value in target._asdict().items():
if isinstance(value, list):
new_row[key] = value[j]
# Append new row to end of dataframe
operating_target_features = operating_target_features.append(new_row, ignore_index=True)
# Mark original row for deletion
expanded_targets.append(i)
# Delete expanded targets
operating_target_features = operating_target_features.drop(expanded_targets)
# Only analyze matches if there are any
if len(operating_target_features) > 0:
# Identify and add records for unmatched portions of target features
# Get target records that have unmatched portions
unmatched_segments = operating_target_features.copy()
unmatched_segments = unmatched_segments[
(unmatched_segments['t_prop'].notnull()) &
(unmatched_segments['t_prop'] < 1)]
new_target_records = []
# Iterate through groups of target records
for target_index, target_group in unmatched_segments.groupby('target_index'):
# Get the linref intervals associated with each of the matched segments
matched_linrefs = target_group['t_linref'].tolist()
# Combine the intervals
matched_linrefs_merged = merge_intervals(matched_linrefs)
# Get the original target geometry
orig_target_geometry = target_group.iloc[0]['geometry']
# Construct linref intervals for the unmatched parts
geometry_extents = [0, orig_target_geometry.length]
matched_linrefs_list = [linref for tup in matched_linrefs_merged for linref in tup]
all_linrefs_list = sorted(geometry_extents + matched_linrefs_list)
unmatched_linrefs = [
(all_linrefs_list[i], all_linrefs_list[i + 1])
for i in range(0, len(all_linrefs_list), 2)]
unmatched_lines = [
split_line_at_dists(orig_target_geometry, pair)[1]
for pair in unmatched_linrefs]
# For each unmatched line, make a new target record
for unmatched_line, unmatched_linref in zip(unmatched_lines, unmatched_linrefs):
if unmatched_line.length > 1:
# Get all the attributes associated with the original target record
new_target_record = target_group.iloc[0].to_dict()
# Modify the match attributes
new_target_record['match_index'] = np.nan
new_target_record['match_type'] = np.nan
new_target_record['h_tm'] = np.nan
new_target_record['t_prop'] = np.nan
new_target_record['t_seg'] = unmatched_line
new_target_record['t_linref'] = unmatched_linref
new_target_record['h_mt'] = np.nan
new_target_record['m_prop'] = np.nan
new_target_record['m_seg'] = np.nan
new_target_record['m_linref'] = np.nan
new_target_record['geometry'] = orig_target_geometry
new_target_records.append(new_target_record)
# Add new target records to operating features
new_target_records = gpd.GeoDataFrame(new_target_records, geometry='geometry')
operating_target_features = pd.concat([operating_target_features, new_target_records])
# Replace target geometries with target segments (if not NaN)
##### This appears to be duplicated below; not sure if it needs to happen twice
operating_target_features['geometry'] = operating_target_features.apply(
lambda row: row['t_seg'] if isinstance(row['t_seg'], LineString) else row['geometry'], axis=1)
# For each unique target geometry, delete all matches except the closest one
# (expanded targets are deleted if they don't have the closest match)
# Required if 'closest_target'
if closest_match or closest_target:
# Identify sets of records with identical targets
equivalent_target_sets = [d for _, d in operating_target_features.groupby(
['target_index','t_linref']) if len(d) > 1]
# Identify which of these records has the closest match
equivalent_record_ids = []
closest_records = gpd.GeoDataFrame(crs=operating_target_features.crs)
for equivalent_target_set in equivalent_target_sets:
# Keep track of IDs for equivalent records
equivalent_record_ids.extend(equivalent_target_set.index.tolist())
# Identify minimum tc and ct distances and associated indices
h_tm_min_idx = equivalent_target_set['h_tm'].astype(float).idxmin()
h_tm_min = equivalent_target_set['h_tm'].astype(float).min()
h_mt_min_idx = equivalent_target_set['h_mt'].astype(float).idxmin()
h_mt_min = equivalent_target_set['h_mt'].astype(float).min()
# Identify overall closest match
min_idx = h_tm_min_idx if h_tm_min < h_mt_min else h_mt_min_idx
closest_records = closest_records.append(
operating_target_features.loc[[min_idx]], ignore_index=True)
# Drop equivalent records
operating_target_features = operating_target_features.drop(
equivalent_record_ids)
# Add back those with the closest match
operating_target_features = operating_target_features.append(
closest_records, ignore_index=True)
# Ensure that each match feature is only matched to one, closest target feature
# (No targets are deleted, but matches are removed if a given target isn't closest)
if closest_target:
# Identify sets of records with the same match id
match_id_sets = [d for _, d in operating_target_features.groupby(
'match_index') if len(d) > 1]
# Within these sets, identify sets with overlapping linear references
for match_id_set in match_id_sets:
# Get ID for match feature
match_id = match_id_set.iloc[0]['match_index']
# Get raw geometry for match feature
match_geom = match_features.loc[match_id]['geometry']
# Find overlapping linear reference ranges among the original matches
lin_ref_ranges = merge_intervals(match_id_set['m_linref'].tolist())
# Identify sets of records within each range
lin_ref_sets = [match_id_set[match_id_set['m_linref'].apply(
lambda x: True if (x[0] >= lower and x[1] <= upper) else False)]
for lower, upper in lin_ref_ranges]
# Analyze each set of targets with overlapping matches
for lin_ref_set, lin_ref_range in zip(lin_ref_sets, lin_ref_ranges):
# Get the portion of the raw match feature within the linear reference range
_, range_match_geom, _ = split_line_at_dists(match_geom, lin_ref_range)
# Split the linear reference feature into segments parallel to match features
t_seg_endpoints = [x for t_seg in lin_ref_set['t_seg'] for x in endpoints(t_seg)]
t_seg_endpoint_lin_refs = [range_match_geom.project(x) for x in t_seg_endpoints]
range_match_segments = split_line_at_dists(range_match_geom, t_seg_endpoint_lin_refs)
# For each segment, see which target feature is closest based on hausdorff distance
closest_targets = [
nearest_neighbor(
segment,
GeoDataFrame(geometry=lin_ref_set['t_seg']),
hausdorff_distance=True
).index[0]
for segment in range_match_segments]
# Group adjacent segments with the same target
groups = [list(group) for _, group in itertools.groupby(
zip(closest_targets, range_match_segments), key=lambda x: x[0])]
closest_targets = [group[0][0] for group in groups]
match_segments = [[x[1] for x in group] for group in groups]
match_segments = [sh.ops.linemerge(x) for x in match_segments]
# Only move forward if there are match LineString match segments to work with
if LineString in [type(x) for x in match_segments]:
# Remove any non-LineString geometries (e.g., GeometryCollection)
try:
match_segments, closest_targets = zip(
*[(segment, idx) for segment, idx
in zip(match_segments, closest_targets)
if isinstance(segment, LineString)])
except:
match_segment_types = [type(x) for x in match_segments]
closest_target_types = [type(x) for x in closest_targets]
print('match segments: {}, {}'.format(str(match_segment_types), str(match_segments)))
print('closest_targets: {}, {}'.format(str(closest_target_types), str(closest_targets)))
# Calculate the match prop and lin_ref bounds for the grouped match segments
match_props = [x.length/match_geom.length for x in match_segments]
match_lin_refs = [tuple([match_geom.project(point) for point in endpoints(segment)])
for segment in match_segments]
# Update match info for the chosen target
for idx, match_prop, match_segment, match_lin_ref in zip(
closest_targets, match_props, match_segments, match_lin_refs):
# lin_ref_set.at[idx, 'match_index'] = match_id
lin_ref_set.at[idx, 'm_prop'] = match_prop
lin_ref_set.at[idx, 'm_seg'] = match_segment
lin_ref_set.at[idx, 'm_linref'] = match_lin_ref
lin_ref_set.at[idx, 'h_tm'] = directed_hausdorff(
lin_ref_set.at[idx, 't_seg'], match_segment)
lin_ref_set.at[idx, 'h_mt'] = directed_hausdorff(
match_segment, lin_ref_set.at[idx, 't_seg'])
# Remove match info for other targets in set
not_closest_targets = [x for x in lin_ref_set.index
if x not in closest_targets]
for idx in not_closest_targets:
lin_ref_set.at[idx, 't_prop'] = np.nan
# lin_ref_set.at[lin_ref_set_idx, 't_seg'] = np.nan ########### Maybe don't get rid of the t_seg?
lin_ref_set.at[idx, 't_linref'] = np.nan
lin_ref_set.at[idx, 'm_prop'] = np.nan
lin_ref_set.at[idx, 'm_seg'] = np.nan
lin_ref_set.at[idx, 'm_linref'] = np.nan
lin_ref_set.at[idx, 'h_tm'] = np.nan
lin_ref_set.at[idx, 'h_mt'] = np.nan
lin_ref_set.at[idx, 'match_index'] = np.nan
# Remove original lin_ref_set rows from the operating_target_features
operating_target_features = operating_target_features.drop(lin_ref_set.index)
# Append rows from lin_ref_set back onto operating_target_features
operating_target_features = operating_target_features.append(lin_ref_set)
# Gather values from fields of match features
if match_fields and isinstance(match_fields, bool):
match_fields = match_features.columns.tolist()
match_fields.remove('geometry')
elif isinstance(match_fields, list):
match_fields = match_fields
else:
match_fields = []
if match_strings and (match_strings[1] not in match_fields):
match_fields.append(match_strings[1])
# Join fields for matches
operating_target_features = operating_target_features.merge(
match_features[match_fields], how='left', left_on='match_index', right_index=True)
# Join operating target features back onto all target features
target_features = target_features.merge(
operating_target_features.drop(columns=['geometry']),
how='outer', left_index=True, right_on='target_index', suffixes=field_suffixes)
# Sort by original index
target_features = target_features.sort_values(['target_index'])
# Convert empty lists to NaN
target_features = target_features.applymap(
lambda x: np.nan if x == [] else x)
# Convert single-element lists to their sole elements
target_features = target_features.applymap(
lambda x: x[0] if (isinstance(x, list) and len(x) == 1) else x)
# Calculate string matches, if specified
if match_strings:
def fuzzy_score(row, col_a, col_b):
a = row[col_a]
b = row[col_b]
def standardize_and_score(a, b):
a = standardize_streetname(str(a))
b = standardize_streetname(str(b))
return (fuzz.token_set_ratio(a, b) / 100)
# Inputs could be lists, so make them lists if they aren't
a_list = listify(a)
b_list = listify(b)
# Get fuzzy scores for each string combination
scores = []
for a in a_list:
for b in b_list:
if (pd.notnull(a) and pd.notnull(b)):
scores.append(standardize_and_score(a, b))
if len(scores) > 0:
return scores
else:
return np.nan
target_string, match_string = match_strings
if match_string in original_target_feature_columns:
target_string = target_string + field_suffixes[0]
match_string = match_string + field_suffixes[1]
target_features['match_strings'] = target_features.apply(
fuzzy_score, args=(target_string, match_string), axis=1)
# Replace geometry with t_seg if there is one available
target_features['geometry'] = target_features.apply(
lambda row: row['t_seg'] if isinstance(row['t_seg'], LineString) else row['geometry'], axis=1)
# Drop stats columns if not specifically requested
if not match_stats:
target_features = target_features.drop(
columns=['h_tm','t_prop','t_seg','t_linref','h_mt','m_prop','m_seg','m_linref'])
# Move target index to front
target_features = df_first_column(target_features, 'target_index')
# Move the geometry column to the end
target_features = df_last_column(target_features, 'geometry')
# Reset the index
target_features = target_features.reset_index(drop=True)
# Ensure that crs is the same as original
target_features.crs = original_crs
# Report done
if verbose:
print('100% ({} segments) complete after {:04.2f} minutes'.format(counter, (time()-start) / 60))
return target_features | d9c08e8e156a525495a03e5e9d6881c33ecdf0a2 | 3,654,824 |
def create_anchors_3d_stride(grid_size,
voxel_size=[0.16, 0.16, 0.5],
coordinates_offsets=[0, -19.84, -2.5],
dtype=np.float32):
"""
Args:
feature_size: list [D, H, W](zyx)
sizes: [N, 3] list of list or array, size of anchors, xyz
Returns:
anchors: [*feature_size, num_sizes, num_rots, 7] tensor.
"""
# almost 2x faster than v1
x_stride, y_stride, z_stride = voxel_size
x_offset, y_offset, z_offset = coordinates_offsets
x_centers = np.arange(grid_size[0], dtype=dtype)
y_centers = np.arange(grid_size[1], dtype=dtype)
z_centers = np.arange(grid_size[2], dtype=dtype)
z_centers = z_centers * z_stride + z_offset + 0.25
y_centers = y_centers * y_stride + y_offset + 0.08
x_centers = x_centers * x_stride + x_offset + 0.08
xx, yy, zz = np.meshgrid(x_centers, y_centers, z_centers)
sizes = np.stack((xx, yy , zz), axis=-1)
sizes = np.reshape(sizes, [-1,3])
return sizes | 129e54a855bbacb2026eb08b5741ab70dd0374f4 | 3,654,828 |
def combined_roidb(imdb_names):
"""
Combine multiple roidbs
"""
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}` for training'.format(imdb.name))
imdb.set_proposal_method("gt")
print('Set proposal method: {:s}'.format("gt"))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = imdb2(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
return imdb, roidb | 4660a6ffff11511c449629c9fdb7f5d566a886f9 | 3,654,829 |
from re import A
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item | dbec0b41b16fa48996a735372bfb001b386c7300 | 3,654,830 |
def exception_log_and_respond(exception, logger, message, status_code):
"""Log an error and send jsonified respond."""
logger.error(message, exc_info=True)
return make_response(
message,
status_code,
dict(exception_type=type(exception).__name__, exception_message=str(exception)),
) | c784efd4b8adbbc463ff1d2a499ffd598253349d | 3,654,832 |
import re
def parse_cdhit_clusters(cluster_file):
"""
Parses cdhit output into three collections in a named tuple:
clusters: list of lists of gene ids.
reps: list of representative gene for each cluster
lookup: dict mapping from gene names to cluster index
In this setup, cluster ids are the position in either of the
first two lists.
"""
# re-call with file-like object if we are give an path
if isinstance(cluster_file, str):
with open(cluster_file) as cluster_handle:
return parse_cdhit_clusters(cluster_handle)
# initialize final containers
clusters = []
cluster_reps = []
cluster_lookup = {}
# expression for parsing cluster line (captures gene name and alignment)
gene_expr = re.compile(r"\s>(\S+)\.\.\.\s\s*(.+)\s*$")
# loop over lines
for line in cluster_file:
if line.startswith(">"):
# create a new cluster
cluster = []
cluster_id = len(clusters)
clusters.append(cluster)
continue
# parse gene name from line
gene, alignment = gene_expr.search(line).groups()
if alignment.strip() == "*":
cluster_reps.append(gene)
cluster_lookup[gene] = cluster_id
cluster.append(gene)
# done
return CdhitClusters(clusters, cluster_reps, cluster_lookup) | fe0634c1991f0bd687f8be675ff15cb3290c919c | 3,654,833 |
import torch
def evaluate(model: nn.Module, dataloader: DataLoader) -> Scores:
"""
Evaluate a model without gradient calculation
:param model: instance of a model
:param dataloader: dataloader to evaluate the model on
:return: tuple of (accuracy, loss) values
"""
score = 0
loss = 0
loss_func = nn.LogSoftmax(dim=1).to("cuda")
for i, x in enumerate(dataloader):
img = x[0]
ans = x[1]
ques = x[2]
if torch.cuda.is_available():
img = img.cuda()
ans = ans.cuda()
ques = ques.cuda()
y_hat = model((img, ques))
img = None
ques = None
nll = -loss_func(y_hat)
score += train_utils.batch_accuracy(y_hat, ans.data).sum()
ans = answer_norm(ans)
loss += (nll * ans).sum(dim=1).mean()
loss /= len(dataloader.dataset)
score /= len(dataloader.dataset)
score *= 100
print("val loss = ", loss)
return score, loss | f23fbd72a24122b3a665f29918c52bbd5515d204 | 3,654,834 |
from operator import and_
def remote_judge_get_problem_info(problem_id: str, contest_id: int = -1, contest_problem_id: int = -1):
"""
{
"code":0,
"data":{
"isContest":"是否在比赛中",
"problemData":{
"title":"题目名",
"content":"题目内容",
"background":"题目背景",
"inputFormat":"输入格式",
"outputFormat":'输出格式',
"examples":[{"input":"样例输入","output":"样例输出"}],
"createTime":"创建时间",
"uploaderProfile":{
"uid":"用户ID",
"username":"用户名"
},
"remoteProblemID":"远程题目ID",
"remoteOJ":{
"id":"远程OJID",
"display":"远程OJ显示名",
"availableLanguages":[
{"id":"0","display":"C++"}
]
},
"public":"是否公开",
"hint":"提示",
"recentDiscussions":[
{
"id":123,
"title":"qw"
}
],
"acceptedCount":"",
"submissionCount":""
},
"userData":{
"lastCode":"上次提交的代码",
"lastLanguage":"上次选择的语言",
"status":"qwq",
"id":"",
"accounts":{
"id":{
"username":"用户名",
"oj":"OJ",
"accountID":"ID"
}
}
}
}
}
"""
# in_contest = contest_id != -1
contest: Contest = Contest.by_id(contest_id)
if contest:
# pass
if not contest.running() and not permission_manager.has_permission(session.get("uid"), "contest.manage"):
return make_response(-1, message="你没有权限查看此题目")
print(contest_problem_id,"contest_problem_id")
problem: Problem = db.session.query(Problem).filter(
Problem.id == contest.problems[contest_problem_id]["id"]).one_or_none()
else:
problem: Problem = db.session.query(Problem).filter(
Problem.id == problem_id).one_or_none()
if not permission_manager.has_permission(session.get("uid"), "remote_judge.use") and problem.uploader_id != int(session.get("uid")):
return make_response(-1, message="你没有权限查看该题目")
if not problem:
return make_response(-1, message="未知题目ID")
if problem.problem_type != "remote_judge":
return make_response(-1, message="此题目非远程评测题目")
uploader: User = db.session.query(User.id, User.username).filter(
User.id == problem.uploader_id).one()
last_submission: Submission = db.session.query(Submission).filter(and_(
Submission.problem_id == problem.id,
Submission.uid == session.get("uid")
)).order_by(Submission.score.desc()).order_by(Submission.id.desc())
last_code, last_language, submission_id, status = "", next(iter(
config.REMOTE_JUDGE_OJS[problem.remote_judge_oj]["availableLanguages"].keys())), -1, None
if last_submission.count():
last_submission = last_submission.first()
last_code = last_submission.code
last_language = last_submission.language
status = last_submission.status
submission_id = last_submission.id
discussions = [
]
discussions_query = db.session.query(Discussion.id, Discussion.title).filter(
Discussion.path == f"discussion.problem.{problem.id}").order_by(Discussion.id.desc()).limit(5)
for item in discussions_query:
discussions.append({
"id": item.id,
"title": item.title
})
accounts = {}
for item in db.session.query(RemoteAccount.account_id, RemoteAccount.username, RemoteAccount.oj).filter(
and_(
RemoteAccount.uid == session.get("uid", -1),
RemoteAccount.oj == problem.remote_judge_oj
)
):
accounts[item.account_id] = {
"username": item.username,
"oj": config.REMOTE_JUDGE_OJS[item.oj]["display"],
"accountID": item.account_id
}
return make_response(0, data={
"isContest": contest is not None,
"problemData": {
"title": problem.title,
"content": problem.content,
"background": problem.background,
"inputFormat": problem.input_format,
"outputFormat": problem.output_format,
"examples": problem.example,
"createTime": problem.create_time,
"uploaderProfile": {
"uid": uploader.id,
"username": uploader.username
} if not contest else None,
"remoteProblemID": problem.remote_problem_id if not contest else None,
"remoteOJ": {
"id": problem.remote_judge_oj,
**config.REMOTE_JUDGE_OJS[problem.remote_judge_oj]
},
"public": problem.public if not contest else None,
"hint": problem.hint,
"recentDiscussions": discussions if not contest else None,
"acceptedCount": db.session.query(Submission).filter(Submission.problem_id == problem.id).filter(Submission.status == "accepted").count() if not contest else None,
"submissionCount": db.session.query(Submission).filter(Submission.problem_id == problem.id).count() if not contest else None,
"id": problem.id
},
"userData": {
"lastCode": last_code,
"lastLanguage": last_language,
"status": status,
"id": submission_id,
"managable": permission_manager.has_permission(
session.get("uid", None), "problem.manage"),
"accounts": accounts
}
}) | aa7f8100bc7516659cf535e0fa7222b6f7b1a065 | 3,654,835 |
def can_write(obj, user):
"""
Takes article or related to article model.
Check if user can write article.
"""
return obj.can_write(user) | 9cb7cc046b63fb82670c4667abe169d6a1a279e4 | 3,654,836 |
def create_external_question(url: str, height: int) -> str:
"""Create XML for an MTurk ExternalQuestion."""
return unparse({
'ExternalQuestion': {
'@xmlns': 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd',
'ExternalURL': url,
'FrameHeight': height
}
}, full_document=False) | d249e82225ab2c1546bd871c166e9b683622a15d | 3,654,837 |
def credentials_batch_account_key_secret_id(config):
# type: (dict) -> str
"""Get Batch account key KeyVault Secret Id
:param dict config: configuration object
:rtype: str
:return: keyvault secret id
"""
try:
secid = config[
'credentials']['batch']['account_key_keyvault_secret_id']
if util.is_none_or_empty(secid):
raise KeyError()
except KeyError:
return None
return secid | 4e7cfb100c2d50ef13d47295ff0b5bb0e3351986 | 3,654,838 |
import re
def is_C2D(lname):
"""
"""
pattns = ['Conv2D']
return any([bool(re.match(t,lname)) for t in pattns]) | a12bfd9857543e568148659f782615b3f2de4b83 | 3,654,839 |
def encounter_media(instance, filename):
"""Return an upload file path for an encounter media attachment."""
if not instance.encounter.id:
instance.encounter.save()
return 'encounter/{0}/{1}'.format(instance.encounter.source_id, filename) | 79e4d8fae1d41edf362e99e6da11442a71565aa0 | 3,654,840 |
from datetime import datetime
def time_range_cutter_at_time(local,time_range,time_cut=(0,0,0)):
""" Given a range, return a list of DateTimes that match the time_cut
between start and end.
:param local: if False [default] use UTC datetime. If True use localtz
:param time_range: the TimeRange object
:param time_cut: HH:MM:SS of when to cut. eg: (0,0,0) for midnight
"""
( start, end ) = time_range.get(local)
index = start.replace(
hour=time_cut[0],
minute=time_cut[1],
second=time_cut[2]
)
cuts = []
index += datetime.timedelta(days=1)
while index < end:
cuts.append(index)
index += datetime.timedelta(days=1)
if local:
index = time_range.normalize(index)
return cuts | 57e851fb5b6ae8873dde5719dec668c25561f687 | 3,654,842 |
def _darknet_conv(
x: np.ndarray, filters: int, size: int, strides: int = 1, batch_norm: bool = True
) -> tf.Tensor:
"""create 1 layer with [padding], conv2d, [bn and relu]"""
if strides == 1:
padding = "same"
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = "valid"
x = Conv2D(
filters=filters,
kernel_size=size,
strides=strides,
padding=padding,
use_bias=not batch_norm,
kernel_regularizer=l2(0.0005),
)(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x | f58153aa0c8af8df93289b872309f1c907941848 | 3,654,843 |
def _build_topic_to_consumer_topic_state_map(watermarks):
"""Builds a topic_to_consumer_topic_state_map from a kafka
get_topics_watermarks response"""
return {
topic: ConsumerTopicState({
partition: int((marks.highmark + marks.lowmark) / 2)
for partition, marks in watermarks_map.items()
}, None)
for topic, watermarks_map in watermarks.items()
} | 78ef0710e4823031ad079313484dba0eacc37135 | 3,654,844 |
from typing import Optional
def elgamal_keypair_from_secret(a: ElementModQ) -> Optional[ElGamalKeyPair]:
"""
Given an ElGamal secret key (typically, a random number in [2,Q)), returns
an ElGamal keypair, consisting of the given secret key a and public key g^a.
"""
secret_key_int = a
if secret_key_int < 2:
log_error("ElGamal secret key needs to be in [2,Q).")
return None
return ElGamalKeyPair(a, g_pow_p(a)) | 35de350b6bb434e1bb3d2c52d90f9a96be72dc1f | 3,654,845 |
def current_default_thread_limiter():
"""Get the default `~trio.CapacityLimiter` used by
`trio.to_thread.run_sync`.
The most common reason to call this would be if you want to modify its
:attr:`~trio.CapacityLimiter.total_tokens` attribute.
"""
try:
limiter = _limiter_local.get()
except LookupError:
limiter = CapacityLimiter(DEFAULT_LIMIT)
_limiter_local.set(limiter)
return limiter | 7abec5d74b9cfdaa663fd432587ea19440b7132f | 3,654,846 |
import copy
def _mask_board(board):
"""
A function that copies the inputted board replaces all ships with empty coordinates to mask them.
:param board: a 2D numpy array containing a string representation of the board. All ships should be visible.
:return: a 2D numpy array containing a string representation of the board, with all ships hidden.
"""
masked = copy.deepcopy(board) # copy operation
for (y, x), val in np.ndenumerate(board):
if val.isdigit():
masked[y][x] = ''
return masked | c6832c90ac96d61563e37482773abf627d92a05a | 3,654,847 |
def remove_head_id(ref, hyp):
"""Assumes that the ID is the begin token of the string which is common
in Kaldi but not in Sphinx."""
ref_id = ref[0]
hyp_id = hyp[0]
if ref_id != hyp_id:
print('Reference and hypothesis IDs do not match! '
'ref="{}" hyp="{}"\n'
'File lines in hyp file should match those in the ref file.'.format(ref_id, hyp_id))
exit(-1)
ref = ref[1:]
hyp = hyp[1:]
return ref, hyp | 210798e8a02f555f70a1d9f2de9ce098dd0669fb | 3,654,849 |
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp | 446feda40cc6698b5cbc80c3b14fa3212ef2800b | 3,654,850 |
def get_miner_day_list():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
date = request.form.get("date")
data = MinerService.get_miner_day_list(miner_no, date)
return response_json(data) | 4fd523e8855ba498a1d694e532d27c863e7f9407 | 3,654,851 |
def get_notebook_logs(experiment_id, operator_id):
"""
Get logs from a Experiment notebook.
Parameters
----------
experiment_id : str
operator_id : str
Returns
-------
dict or None
Operator's notebook logs. Or None when the notebook file is not found.
"""
notebook = get_jupyter_notebook(experiment_id, operator_id)
if not notebook:
return None
notebook = notebook["content"]
logs = {}
for cell in notebook["cells"]:
try:
metadata = cell["metadata"]["papermill"]
if metadata["exception"] and metadata["status"] == "failed":
for output in cell["outputs"]:
if output["output_type"] == "error":
error_log = output["traceback"]
traceback = remove_ansi_escapes(error_log)
logs = {"exception": output["ename"], "traceback": traceback}
except KeyError:
pass
return logs | d98865cdbca25839bb6010ab5e726fd35d162ada | 3,654,852 |
from typing import Callable
def modify_env2(
function: Callable[[_UpdatedType], _SecondType],
) -> Kinded[Callable[
[Kind2[_Reader2Kind, _FirstType, _SecondType]],
Kind2[_Reader2Kind, _FirstType, _UpdatedType],
]]:
"""
Modifies the second type argument of a ``ReaderBased2``.
In other words, it modifies the function's
signature from: ``a -> b``
to: ``Container[x, a] -> Container[x, b]``
.. code:: python
>>> from returns.pointfree import modify_env2
>>> from returns.context import RequiresContext
>>> def multiply(arg: int) -> RequiresContext[int, int]:
... return RequiresContext(lambda deps: arg * deps)
>>> assert modify_env2(int)(multiply(3))('4') == 12
Note, that this function works with only ``Kind2`` containers
with ``.modify_env`` method.
See :class:`returns.primitives.interfaces.specific.reader.ReaderBased2`
for more info.
"""
@kinded
def factory(
container: Kind2[_Reader2Kind, _FirstType, _SecondType],
) -> Kind2[_Reader2Kind, _FirstType, _UpdatedType]:
return internal_modify_env2(container, function)
return factory | 5ed2c5deaaa376e4884f31e3ba08d3b2839cc1a5 | 3,654,853 |
def model_trees(z, quantiles, normed=False,
dbhfile='c:\\projects\\MLM_Hyde\\Data\\hyde_runkolukusarjat.txt',
plot=False,
biomass_function='marklund'):
"""
reads runkolukusarjat from Hyde and creates lad-profiles for pine, spruce and decid.
Args:
z - grid (m)
quantiles - cumulative frequency limits for grouping trees
normed - True returns sum(lad*dz) normalized to unity
Returns:
lad_p, lad_s, lad_d - leaf-area density profiles for model treegroups (m2/m3)
n_p, n_s, n_d - trees / ha in model treegroups
"""
dat = np.loadtxt(dbhfile, skiprows=1)
dz = z[1]-z[0]
M = len(quantiles)
# year 2008 data
pine = dat[:, [0, 1]]
spruce = dat[:, [0, 2]]
decid = dat[:, [0, 3]]
# pines
h, hb, mleaf, L, a = profiles_hyde(pine, 'pine', z, biomass_function=biomass_function)
n = pine[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_p = np.zeros([len(z), M])
n_p = np.zeros(M)
lai_p = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_p[:, k] = np.sum(a[:, f], axis=1)
n_p[k] = np.sum(n[f])
lai_p[k] = sum(dz*lad_p[:,k])
m = quantiles[k]
if normed:
lad_p[:, k] = lad_p[:, k] / np.maximum(np.sum(lad_p[:, k] * dz), eps)
# spruces
h, hb, mleaf, L, a = profiles_hyde(spruce, 'spruce', z, biomass_function=biomass_function)
n = spruce[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_s = np.zeros([len(z), M])
n_s = np.zeros(M)
lai_s = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_s[:, k] = np.sum(a[:, f], axis=1)
n_s[k] = np.sum(n[f])
lai_s[k] = sum(dz*lad_s[:,k])
m = quantiles[k]
if normed:
lad_s[:, k] = lad_s[:, k] / np.maximum(np.sum(lad_s[:, k] * dz), eps)
# decid
h, hb, mleaf, L, a = profiles_hyde(decid, 'birch', z, biomass_function=biomass_function)
n = decid[:, 1]
c = np.cumsum(n) / np.maximum(sum(n), eps) # relative frequency
m = 0.0
lad_d = np.zeros([len(z), M])
n_d = np.zeros(M)
lai_d = np.zeros(M)
for k in range(M):
f = np.where((c > m) & (c <= quantiles[k]))[0]
lad_d[:, k] = np.sum(a[:, f], axis=1)
n_d[k] = np.sum(n[f])
lai_d[k] = sum(dz*lad_d[:,k])
m = quantiles[k]
if normed:
lad_d[:, k] = lad_d[:, k] / np.maximum(np.sum(lad_d[:, k] * dz), eps)
if plot:
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.figure(figsize=(2.5,3.5))
for k in range(M):
plt.plot(lad_p[:, k],z,color=colors[0], label='pine, %.2f m$^2$m$^{-2}$' % lai_p[k])#,lad_g,z)
plt.plot(lad_s[:, k],z,color=colors[1], label='spruce, %.2f m$^2$m$^{-2}$' % lai_s[k])
plt.plot(lad_d[:, k],z,color=colors[2], label='decid, %.2f m$^2$m$^{-2}$' % lai_d[k])
plt.title(" ")#dbhfile.split("/")[-1])
plt.ylabel('height [m]')
if normed:
plt.xlabel('normalized lad [-]')
else:
plt.xlabel('lad [m$^2$m$^{-3}$]')
plt.tight_layout()
return lad_p, lad_s, lad_d, n_p, n_s, n_d, lai_p, lai_s, lai_d | ba3c1ea345031a8b5434e1dd4f005b1c2c1e74ce | 3,654,854 |
def inject_general_timeline():
"""This function injects the function object 'Tweet.get_general_timeline'
into the application context so that 'get_general_timeline' can be accessed
in Jinja2 templates.
"""
return dict(get_general_timeline=Tweet.get_general_timeline) | 56b395da0facda561061c8f63eb3eb26c07f3605 | 3,654,855 |
def get_vaccinated_model(model, area=None):
"""Get all states that can be vaccinated or recovered (by area).
Parameters
----------
model : amici.model
Amici model which should be evaluated.
areas : list
List of area names as strings.
Returns
-------
states : list
List of states that can be vaccinated.
"""
if area is None:
states = [
x
for x in model.getStateNames()
if not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
]
else:
states = [
x
for x in model.getStateNames()
if (
not ("vac0" in x)
and (("susceptible" in x) or ("infectious" in x))
or ("recovered" in x)
)
and (area in x)
]
return states | c03a9d048abb08561463b1975ffec663f24267b3 | 3,654,857 |
from datetime import datetime
def MicrosecondsToDatetime(microseconds):
"""Returns a datetime given the number of microseconds, or None."""
if microseconds:
return datetime.utcfromtimestamp(float(microseconds) / 1000000)
return None | 69fd3dc3b8d1a97e7a64037cabe988365b2c6e63 | 3,654,858 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.