content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _convert_json_keywords(json_dict, conversion_dict):
"""
Makes a shallow copy of a dictionary with JSON-formatted, producing a dictionary with
Python-formatted keys
:param json_dict: the JSON dictionary
:param conversion_dict: a dictionary that maps JSON keywords to their Python equivalents. Any
keywords not present here are assumed to be identical in both.
:return: a new dictionary with Python-formatted keys
"""
converted_dict = {}
for keyword, value in json_dict:
python_keyword = conversion_dict[keyword]
if not python_keyword:
python_keyword = keyword
converted_dict[python_keyword] = value
return converted_dict
|
bef2749d49411489b61a6692eea15c06004ab264
| 698,097 |
def DSER(results):
"""DA Segmentation Rate: number of segments of the
reference incorrectly segmented
over number of reference segments.
"""
assert len(results) == 2
CorrectSegs = results[0]
TotalSegs = results[1]
return ((TotalSegs-CorrectSegs)/TotalSegs) * 100
|
5d74c7bd3329448609fe36fa4d7778155c54d7c1
| 698,099 |
def normalize_dataframe(df):
""" Converts the total occurrences and total docs into percentages"""
df.total_occurrences = df.total_occurrences * 100 / df.total_occurrences.sum()
df.total_docs = df.total_docs * 100 / df.total_docs.sum()
return df
|
081efaf887b4465bffca9ebe8d2c8cb11e32f720
| 698,104 |
def set_element(base, index, value):
"""Implementation of perl = on an array element"""
base[index] = value
return value
|
11940389f4f24868c5afc1e4dbbcbf370a1af102
| 698,105 |
import click
def command_line_input_output_file_arguments(f):
"""
Decorator for specifying input and output file arguments in a command
"""
f = click.argument("outfile", type=click.File("w"), default="-")(f)
f = click.argument("infile", type=click.File("r"), default="-")(f)
return f
|
832a527c900d6dc073f0e543d06c48696731c9d7
| 698,106 |
from typing import Sequence
from typing import Any
def _get_cont_out_labels(network_structure: Sequence[Sequence]) -> Any:
"""
Compute the contracted and free labels of `network_structure`.
Contracted labels are labels appearing more than once,
free labels are labels appearing exactly once.
Computed lists are ordered according to int and ASCII ordering
for integer and string values, with first entries in each list
being ordered integer labels followed by ASCII ordered string
labels.
Returns:
cont_labels, out_labels: The contracted and free labels
of `network_structure`.
"""
flat_labels = [l for sublist in network_structure for l in sublist]
out_labels = [l for l in flat_labels if flat_labels.count(l) == 1]
int_out_labels = sorted([o for o in out_labels if not isinstance(o, str)
])[::-1]
# pylint: disable=unnecessary-lambda
str_out_labels = sorted([o for o in out_labels if isinstance(o, str)],
key=lambda x: str(x))
cont_labels = []
for l in flat_labels:
if (flat_labels.count(l) > 1) and (l not in cont_labels):
cont_labels.append(l)
int_cont_labels = sorted([o for o in cont_labels if not isinstance(o, str)])
# pylint: disable=unnecessary-lambda
str_cont_labels = sorted([o for o in cont_labels if isinstance(o, str)],
key=lambda x: str(x))
return int_cont_labels, str_cont_labels, int_out_labels, str_out_labels
|
f749b67789a00fe1cf0b2491a0a476d3882de9be
| 698,114 |
def get_avg_repr(inp_reprs, indxs, indxs_mask):
"""
Returns the average representation based on passed indxs and mask.
:param inp_reprs: [batch_size1, dim]
:param indxs: [batch_size2, seq_len]
:param indxs_mask: [batch_size2, seq_len]
:return: [batch_size2, dim]
"""
sel_reprs = inp_reprs[indxs] # [batch_size2, seq_len, dim]
avg_reprs = (sel_reprs * indxs_mask.unsqueeze(-1)).sum(dim=1)
avg_reprs = avg_reprs / indxs_mask.sum(-1, keepdim=True).float()
return avg_reprs
|
7cf4cc78c108cfe58691fe7b0cec3b2c3608230c
| 698,115 |
import hashlib
def hashhex(s):
"""
Returns a heximal formated SHA1 hash of the input string.
"""
h = hashlib.sha1()
h.update(s)
return h.hexdigest()
|
a96fb004984a583c72fdbb7f90ce705858ab8f9d
| 698,116 |
def convert_coord(x_center, y_center, radius):
"""
Convert coordinates from central point to top left point
:param x_center: x coordinate of the center
:param y_center: y coordinate of the center
:param radius: the radius of the ball
:return: coordinates of top left point of the surface
"""
x = x_center - radius
y = y_center - radius
return x, y
|
95a6cfd91fd7a59d7995820d3d5fceba6ff985a1
| 698,119 |
def chop(x,y,s0,s1):
"""Chop two 1-d numpy arrays from s0 to s1"""
return x[s0:s1], y[s0:s1]
|
e43b4cbad862558862bb3539a4eac0add1bd14a1
| 698,121 |
def psri(b3, b4, b6):
"""
Plant Senescence Reflectance Index (Merzlyak et al., 1999).
.. math:: PSRI = (b4 - b3)/b6
:param b3: Green.
:type b3: numpy.ndarray or float
:param b4: Red.
:type b4: numpy.ndarray or float
:param b6: Red-edge 2.
:type b6: numpy.ndarray or float
:returns PSRI: Index value
.. Tip::
Merzlyak, M.N.; Gitelson, A.A.; Chivkunova, O.B.; Rakitin, V.Y. 1999. \
Non-destructive optical detection of pigment changes during leaf \
senescence and fruit ripening. Physiologia Plantarum 106, 135-141. \
doi:10.1034/j.1399-3054.1999.106119.x.
"""
PSRI = (b4 - b3)/b6
return PSRI
|
3be121d6e0852a6a83d307773ffacf8fddec9f9d
| 698,122 |
def no_nodes(G):
"""
returns the number of nodes in a undirected network
"""
return len(G)
|
9504cc092ae63069399124e478b6c9b20d3879c9
| 698,123 |
def PNT2Tidal_Tv14(XA,chiA=0,chiB=0,AqmA=0,AqmB=0,alpha2PNT=0):
""" TaylorT2 2PN Quadrupolar Tidal Coefficient, v^14 Timing Term.
XA = mass fraction of object
chiA = aligned spin-orbit component of object
chiB = aligned spin-orbit component of companion object
AqmA = dimensionless spin-induced quadrupole moment of object
AqmB = dimensionless spin-induced quadrupole moment of companion object
alpha2PNT = 2PN Quadrupole Tidal Flux coefficient """
XATo2nd = XA*XA
XATo3rd = XATo2nd*XA
XATo4th = XATo3rd*XA
XATo5th = XATo4th*XA
return (70312133/21168)+(4*alpha2PNT)/(3) - (147794303*XA)/(127008) \
- (20905*XATo2nd)/(28) - (432193*XATo3rd)/(504)-(5848*XATo4th)/(9) \
+ (857*XATo5th)/(3) + (-(639*XATo2nd)/(2)+(525*XATo3rd)/(2) \
+ AqmA*(-312*XATo2nd+256*XATo3rd))*chiA*chiA \
+ (-609*XA+1108*XATo2nd-499*XATo3rd)*chiA*chiB \
+ (-(639)/(2)+(1803*XA)/(2)-(1689*XATo2nd)/(2) + (525*XATo3rd)/(2) \
+ AqmB*(-312+880*XA-824*XATo2nd+256*XATo3rd))*chiB*chiB
|
82eae87495785a5d0cce4d3f0ae5b6654395a42c
| 698,124 |
import random
def noisify(val, eps):
"""
Add a Gaussian White noise to function value
"""
val_noisy = (1 + random.gauss(mu=0,sigma=eps)) * (1-val)
return val_noisy
|
40059eb7f87aa1b7b7096a1c84c7edef09cef4e1
| 698,126 |
def get_box_size(box):
"""Get box size"""
x0, y0, x1, y1 = box
sx = abs(x1 - x0) + 1
sy = abs(y1 - y0) + 1
return (sx, sy)
|
cefa6c8950687f0b4d244ebf9080ab29e358a7b2
| 698,129 |
from datetime import datetime
def unix_epoch_to_datetime(ux_epoch):
"""Convert number of seconds since 1970-01-01 to `datetime.datetime`
object.
"""
return datetime.utcfromtimestamp(ux_epoch)
|
13edceec1631a2a3db06dad215380f609693f441
| 698,133 |
import torch
def point_edt2(point: torch.Tensor, grid: torch.Tensor) -> torch.Tensor:
"""Batched version of a Squared Euclidean Distance Transform for a D-dimensional point.
Args:
point: torch.Tensor of size [batch, *, D]. Each element is interpreted as a D-dimensional point (e.g. [row, col]
or [depth, row, col]) and * represents any number of additional dimensions (eg. channels or images or both).
grid: torch.Tensor of size [*D, D], where *D represents D elements defining a lattice that defines the coordinates
of each output pixel/voxel.
Returns:
a torch.Tensor of size [batch, *, *D] representing the EDT^2 of each point in the input batch where *
represents any additional dimensions from the input, and *D is the size of each dimension of the lattice.
"""
inshape = point.shape
outshape = (*inshape[0:-1], *grid.shape[0:-1])
dim = len(grid.shape) - 1
point = point.view(-1, *[1] * dim, dim)
# need to replicate the grid for each item in the batch
grid = grid.expand(point.shape[0], *grid.shape)
pl = (grid - point)
d = (pl * pl).sum(dim=-1)
d = d.view(outshape)
return d
|
b186ccccd94d8b709974efd3b2ec5817c698f77b
| 698,134 |
import queue
def bfs(G, start):
""" A simple breadth-first search algorithm implemented using native queues. """
seen = set()
q = queue.Queue()
# we don't care about threading so don't ask Queue to block execution
q.put_nowait(start)
while not q.empty():
# get the waiting node, again without blocking execution
u = q.get_nowait()
if u not in seen:
seen.add(u)
# get all of u's neighbors and enqueue them
for n in G[u]: q.put_nowait(n)
return seen
|
a15f6ef42f8873b108c2bd48c462175c77fdeee8
| 698,136 |
import re
def content_type(response, **patterns):
"""Return name for response's content-type based on regular expression matches."""
ct = response.headers.get('content-type', '')
matches = (name for name, pattern in patterns.items() if re.match(pattern, ct))
return next(matches, '')
|
c1071b2feae41bd049a26542d89414de54c06d8e
| 698,137 |
import re
def convert_ip_address(ip_address: str):
"""
This function converts a ipv4 address in standard string format to a HEX representation
:param ip_address: string with IPv4 address in format '192.168.0.1'
:return: HEX representation of IPv4 address (string)
"""
if re.search('^((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])(\.(?!$)|$)){4}$', ip_address) is None:
return None
ip_addr = ip_address.split('.')
for i in range(4):
ip_addr[i] = hex(int(ip_addr[i]))[2:]
while len(ip_addr[i]) < 2:
ip_addr[i] = '0' + ip_addr[i]
ip_address = ip_addr[0] + ip_addr[1] + ' ' + ip_addr[2] + ip_addr[3]
return ip_address
|
1f50e6e2cb34325d07680a58090b58a1e00b745e
| 698,138 |
import base64
def base32_decode(encoded_bytes: bytes) -> str:
"""
Decodes a given bytes-like object to a string, returning a string
>>> base32_decode(b'JBSWY3DPEBLW64TMMQQQ====')
'Hello World!'
>>> base32_decode(b'GEZDGNBVGY======')
'123456'
>>> base32_decode(b'ONXW2ZJANRXW4ZZAMNXW24DMMV4CA43UOJUW4ZY=')
'some long complex string'
"""
# decode the bytes from base32
# then, decode the bytes-like object to return as a string
return base64.b32decode(encoded_bytes).decode("utf-8")
|
215a1aacd815fe11b93cf7dc6105abdff3492ab2
| 698,139 |
import re
def replaceThreeOrMore(word):
"""
look for 3 or more repetitions of letters and replace with this letter itself only once
"""
pattern = re.compile(r"(.)\1{3,}", re.DOTALL)
return pattern.sub(r"\1", word)
|
c052a082d74873da1a4c64dc035aeff429b29efd
| 698,140 |
def luhn_validation(credit_card_number: str) -> bool:
"""
Function to luhn algorithm validation for a given credit card number.
>>> luhn_validation('4111111111111111')
True
>>> luhn_validation('36111111111111')
True
>>> luhn_validation('41111111111111')
False
"""
cc_number = credit_card_number
total = 0
half_len = len(cc_number) - 2
for i in range(half_len, -1, -2):
# double the value of every second digit
digit = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
cc_number = cc_number[:i] + str(digit) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(cc_number) - 1, -1, -2):
total += int(cc_number[i])
return total % 10 == 0
|
4ed1cd788b54819ae4caead22bf34975a774dace
| 698,142 |
def _rescale_score_by_abs(score: float, max_score: float,
min_score: float) -> float:
"""
Normalizes an attribution score to the range [0., 1.], where a score
score of 0. is mapped to 0.5.
:param score: An attribution score
:param max_score: The maximum possible attribution score
:param min_score: The minimum possible attribution score
:return: The normalized score
"""
if -1e-5 < min_score and max_score < 1e-5:
return .5
elif max_score == min_score and min_score < 0:
return 0.
elif max_score == min_score and max_score > 0:
return 1.
top = max(abs(max_score), abs(min_score))
return (score + top) / (2. * top)
|
a9ab337cc47f2d62de33c267bfe19c9522190db4
| 698,143 |
def __get_leading_zeros(fl):
"""Returns the number of leading zeros in a float decimal."""
if fl > 1.0:
return 0
else:
fl_splitted = str(fl).split(".")[-1]
N_unstripped = len(fl_splitted)
N_left_stripped = len(fl_splitted.lstrip("0"))
return N_unstripped - N_left_stripped
|
c009a9dcf7f2c57baee3043acf9ec416679fab67
| 698,145 |
def df_to_dictionaries(df, change_names={}, include_index=True):
"""Returns a list of dictionaries, one dictionary for each row in the
DataFrame 'df'. The keys of the dictionary match the DataFrame column names,
except for any substitututions provided in the 'change_names' dictionary;
{'old_name': 'new_name', etc}. If 'include_index' is True, the index
values are included in the dictionary keyed on the index name (unless changed
in the 'change_names' dictionary)
"""
# make a list of the final names to use
names = list(df.columns.values)
if include_index:
names = [df.index.name] + names
# apply name substitutions
for i in range(len(names)):
names[i] = change_names.get(names[i], names[i])
result = []
for ix, row in df.iterrows():
vals = list(row.values)
if include_index:
vals = [ix] + vals
result.append(
dict(zip(names, vals))
)
return result
|
f02c97318dda4da6bb082d3a78898584e27d353c
| 698,147 |
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
|
7ae7a7ac50b6b6ee92718877d242569d7339fd0e
| 698,150 |
def label2Addr(label, labels):
""" Return int address associated with label or None.
label is a string, either digits or symbolic.
Array labels has labels for all addresses in mem.
"""
if label.isdigit():
return int(label)
if label in labels:
return labels.index(label)
|
27b1fe8edce3aa4ee33f2e7ff25843fa958da620
| 698,152 |
def evenify(n):
"""Ensure number is even by incrementing if odd
"""
return n if n % 2 == 0 else n + 1
|
8a0e3263c2a4853c25361fda434c88a5f6c45a91
| 698,158 |
import logging
def _LoadResource(path):
"""Load the resource at given path.
Args:
path: a string resource path.
Returns:
The contents of that resource.
Raises:
ValueError: If the path is not set up correctly.
IOError: If the path is not found, or the resource can't be opened.
"""
try:
with open(path, 'rb') as f:
return f.read()
except IOError as e:
logging.warning('IOError %s on path %s', e, path)
raise e
|
73b5e318214fe44e9de2af251a4e58e7e5ef0376
| 698,159 |
import random
def valid_add(gene_info, individual):
"""Based on gene info and current individual, return a valid index to add
to an individual.
"""
return random.choice(list(set(range(0, gene_info.gene_count))
- individual))
|
bc02ea8d35175ebeb91caf47717bb4d6d8202119
| 698,161 |
from typing import Any
def serialise(obj: Any, no_head: bool = False) -> dict:
"""Takes any non-primitive object and serialises it into a dict.
Arguments:
obj(Any): Any non primitive object.
no_head(bool): Will not specify the module and class of the object when True.
Returns:
dict: A serialised dictionary of all the values of an object. May also contain the module and class.
Raises:
TypeError: Raised when a built in object is given.
"""
if obj.__class__.__module__ == '__builtin__':
raise TypeError("Can't serialise a builtin type.")
cls = obj.__class__
if no_head:
dct = {"values": {}}
else:
dct = {"__module__": cls.__module__,
"__name__": cls.__name__,
"values": {}}
for i in dir(obj):
try:
val = getattr(obj, i)
except AttributeError:
val = None
if i.startswith("_") or callable(val) or i in vars(cls):
continue
elif not isinstance(val, (str, int, bool, dict)) and val is not None:
try:
val = serialise(val)
print(val)
except RecursionError:
val = str(val)
dct["values"][i] = val
if no_head:
return dct["values"]
else:
return dct
|
80f306e4fea637bda548c6292d2d38ecba641af8
| 698,162 |
def bdev_pmem_create_pool(client, pmem_file, num_blocks, block_size):
"""Create pmem pool at specified path.
Args:
pmem_file: path at which to create pmem pool
num_blocks: number of blocks for created pmem pool file
block_size: block size for pmem pool file
"""
params = {'pmem_file': pmem_file,
'num_blocks': num_blocks,
'block_size': block_size}
return client.call('bdev_pmem_create_pool', params)
|
f8851fb3d6472751d213ca4b7c4c1d59915e929c
| 698,165 |
from datetime import datetime
import pytz
def now_iso_time(zulu_frmt=True):
"""
Returns now time in ISO 8601 format in UTC
:param zulu_frmt: if True return zulu time format, e.g. '2020-11-26T13:51:29Z',
otherwise return string with UTC offset, e.g. '2020-11-26T13:51:29+00:00'
"""
time_now = datetime.utcnow().replace(tzinfo=pytz.UTC).replace(microsecond=0).isoformat()
if zulu_frmt:
time_now = time_now.replace("+00:00", "Z")
return time_now
|
55abbdda86beeddf7d6ed5e8e95575b89d7f5071
| 698,167 |
import numbers
def _create_weights_tuple(weights):
"""
Returns a tuple with the weights provided. If a number is provided,
this is converted to a tuple with one single element.
If None is provided, this is converted to the tuple (1.,)
"""
if weights is None:
weights_tuple = (1.,)
elif isinstance(weights, numbers.Number):
weights_tuple = (weights,)
else:
weights_tuple = tuple(float(i) for i in weights)
return weights_tuple
|
6d4006a4a88b47fb009e6f45cc654fd7642cbc6a
| 698,169 |
def nasa_polynomial(output_str):
""" Parse the NASA polynomial from the PAC99 output file.
:param output_str: string for the output file
:type output_str: str
:rtype: str
"""
lines = output_str.splitlines()
return '\n'.join([lines[i] for i in range(11)])
|
cf02c398ac54c40d814bf649fec2b7d3079a01c7
| 698,170 |
import torch
def get_metrics(pred_scores: torch.Tensor,
true_idx: torch.Tensor,
k_values: torch.Tensor):
"""Calculates mean number of hits@k. Higher values are ranked first.
Args:
pred_scores: (B, N) tensor of prediction values where B is batch size
and N number of classes.
ground_truth_idx: (B, 1) tensor with index of ground truth class
k_values: (1, k) tensor containing number of top-k results to be
considered as hits.
Returns:
reciprocals: (B, 1) tensor containing reciprocals of the ranks
hits: (B, k) tensor containing the number of hits for each value of k
"""
# Based on PyKEEN's implementation
true_scores = pred_scores.gather(dim=1, index=true_idx)
best_rank = (pred_scores > true_scores).sum(dim=1, keepdim=True) + 1
worst_rank = (pred_scores >= true_scores).sum(dim=1, keepdim=True)
average_rank = (best_rank + worst_rank).float() * 0.5
reciprocals = average_rank.reciprocal()
hits = average_rank <= k_values
return reciprocals, hits
|
f0c21fbb993805222f2b84fcc6644868211a95b5
| 698,171 |
import re
def clean_text(s):
""" Remove non alphabetic characters. E.g. 'B:a,n+a1n$a' becomes 'Banana' """
s = re.sub("[^a-z A-Z _]", "", s)
s = s.replace(' n ', ' ')
return s.strip().lower()
|
ab18429927bbca362801ec815934aa8dc9a106e9
| 698,172 |
def IsArticleURL(para):
"""该函数接收一个参数,并判断其是否是简书的文章 URL。
Args:
para (str): 需要被判断的参数
Returns:
bool: 如为 True 代表是文章 URL,为 False 则不是
"""
if para.find("http") == -1:
return False
if para.find("www.jianshu.com") == -1:
return False
if para.find("/p/") == -1:
return False
return True
|
91f9878938e1799373ef928aa8ed6ea0fea1a3ca
| 698,174 |
def _validate_lod(lod, tensor_height=-1):
"""Check whether the input length-based lod info is valid.
There are several things to check:
1. lod should be a list of lists. Empty list is fine.
2. The length of each sublist (a lod level) should be at least one.
3. Each element in each lod level should be an integer greater than 0.
4. The sum of one lod level should be equal to the length of the next lod level.
5. The sum of the last lod level should be equal to the tensor height.
Bypass this check if user does not provide tensor_height as input.
Args:
lod: the length-based lod info, e.g., [[2, 3], [2, 1, 2, 3, 4]].
tensor_height: the outermost dimension of the tensor with which the input
lod is associated with.
Returns:
A boolean indicating whether the input lod is valid or not.
"""
assert isinstance(lod, list), "lod should be a list"
# Empty lod is fine
if len(lod) == 0:
return True
lod_sum = []
for level in lod:
assert isinstance(level, list), "each item in lod should be a list"
# Each level of lod should have at least one length info
if len(level) < 1:
return False
level_sum = 0
for lod_len in level:
# Each length in a level should be > 0
if lod_len <= 0:
return False
level_sum += lod_len
lod_sum.append(level_sum)
for idx, val in enumerate(lod_sum[:-1]):
# Each level's sum should be equal to
# the number of items in the next level
if val != len(lod[idx + 1]):
return False
if tensor_height == -1:
return True
else:
# Last level's sum should be equal to the tensor height
return lod_sum[-1] == tensor_height
|
c9717eb8668b03e4da75abdf24003e9458eb9783
| 698,176 |
def escape_string(string):
""" Escape a string for use in Gerrit commands.
Adds necessary escapes and surrounding double quotes to a
string so that it can be passed to any of the Gerrit commands
that require double-quoted strings.
"""
result = string
result = result.replace('\\', '\\\\')
result = result.replace('"', '\\"')
return '"' + result + '"'
|
2c3b16b67377de3cba821cc405c0c5e24943b995
| 698,180 |
import importlib
def import_consumer(value):
"""Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str value: The consumer class in module.Consumer format
:return: tuple(Class, str)
"""
parts = value.split('.')
import_name = '.'.join(parts[0:-1])
import_handle = importlib.import_module(import_name)
if hasattr(import_handle, '__version__'):
version = import_handle.__version__
else:
version = None
# Return the class handle
return getattr(import_handle, parts[-1]), version
|
c63d57614d7be9cfbda35d25b6634ffaec938288
| 698,181 |
def _MakeSplitDimension(value, enabled):
"""Return dict modelling a BundleConfig splitDimension entry."""
return {'value': value, 'negate': not enabled}
|
de4d44598e5c9c76e46b57b4b07f41698dbe983d
| 698,182 |
import math
def calc_easing_degree_for_proportion(proportion):
"""
Calculates a reasonable easing degree for a given proportion.
"""
return -math.log10(proportion) + 1
|
feeab6e87fba3060cbd37f89f3e0391df1bc1102
| 698,184 |
def spatial_mean(tensor, ndim=1):
"""
Average `tensor` over the last dimensions; keep only the first `ndim` ones.
"""
if tensor.ndim > ndim:
return tensor.mean(tuple(range(ndim, tensor.ndim)))
return tensor
|
19bcf5df1d197069842792b9bd5d07a5e223d609
| 698,185 |
import binascii
def get_address_bytes_from_string(address_string):
"""
Given a Bluetooth address as a string, optionally delimited by colons (':'), return the
bytes representation of the address.
:param address_string: A Bluetooth address string, optionally delimited by commas. This value is case-insensitive.
:return: A bytes value corresponding to the raw Bluetooth address.
:raises: :exc:`ValueError` if `address_string` is not a valid Bluetooth address string.
"""
address_string = address_string.replace(":", "")
if len(address_string) != 12:
raise ValueError("Invalid Bluetooth address: {!r}".format(address_string))
try:
# Address string is reversed from bytes data.
return binascii.unhexlify(address_string)[::-1]
except TypeError:
raise ValueError("Invalid Bluetooth address: {!r}".format(address_string))
|
88e83d5916c461e34300a330603e3975320d6568
| 698,186 |
def compute_mean_rt(df):
"""
Computes subject-wise mean RT
Input
---
df : dataframe
aggregate response data
Returns
---
array of subject-wise mean RTs
"""
return df.groupby('subject').rt.mean().values
|
04b51953a8a5fa37fc746e32416bd0b598cfb138
| 698,188 |
import re
def add_iteration_suffix(name):
"""
adds iteration suffix. If name already ends with an integer it will continue iteration
examples:
'col' -> 'col_01'
'col' -> 'col_01'
'col1' -> 'col2'
'col_02' -> 'col_03'
"""
# pylint: disable=import-outside-toplevel
m = re.search(r"\d+$", name)
n = "00"
endstr = None
midchar = "_" if name[-1] != "_" else ""
if m is not None:
midchar = ""
n = m.group()
endstr = -len(n)
name = f"{name[:endstr]}{midchar}{int(n)+1:0{len(n)}}"
return name
|
8021f1bc131f0a303bfaa06caa39e77f1de0dff9
| 698,189 |
def min_max(tr):
"""Return the ratio of minimum to maximum of a trace.
Parameters
----------
tr : 1D array of float
The input profile.
Returns
-------
mm : float
The ratio of the minimum value in `tr` over the maximum.
Examples
--------
>>> tr = np.array([0.8, 0.9, 1.4, 2.0, 1.1])
>>> min_max(tr) # doctest: +ELLIPSIS
0.4...
"""
tr = tr.astype(float)
mm = tr.min() / tr.max()
return mm
|
8fcb533f22addf95ccf6e75e3d116dfe928aa6ca
| 698,190 |
def f(x): # 2
"""Simple recursive function.""" # 3
if x == 0: # 4
return 1 # 5
return 1 + f(x - 1)
|
ad71b050af9f3e634b8afab7565bc4df1c3f1222
| 698,191 |
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
|
7aee4b4bc4b389b718b3e7a8cb9a77c37fd4ff1e
| 698,193 |
def get_value(str_val):
"""convert a string into float or int, if possible."""
if not str_val:
return ""
if str_val is None:
return ""
try:
val = float(str_val)
if "." not in str_val:
val = int(val)
except ValueError:
val = str_val
return val
|
a3c7deee4110ea25a88f8568139015be20fef1d0
| 698,194 |
def relation_bet_point_and_line( point, line ):
"""Judge the realtion between point and the line, there are three situation:
1) the foot point is on the line, the value is in [0,1];
2) the foot point is on the extension line of segment AB, near the starting point, the value < 0;
3) the foot point is on the extension line of segment AB, near the ending point, the value >1;
Args:
point ([double, double]): point corrdination
line ([x0, y0, x1, y1]): line coordiantions
Returns:
[float]: the realtion between point and the line (起点 < 0 <= 线段中 <= 1 < 终点)
"""
pqx = line[2] - line[0]
pqy = line[3] - line[1]
dx = point[0]- line[0]
dy = point[1]- line[1]
# 线段长度的平方
d = pow(pqx,2) + pow(pqy,2)
# 向量 点积 pq 向量(p相当于A点,q相当于B点,pt相当于P点)
t = pqx*dx + pqy*dy
flag = 1
if(d>0):
t = t/d
flag = t
return flag
|
92a1ed906ee4d7fbb97fa46668fedbbd5f704fba
| 698,195 |
def escape_quotes(value):
"""
DTD files can use single or double quotes for identifying strings,
so " and ' are the safe bet that will work in both cases.
"""
value = value.replace('"', "\\"")
value = value.replace("'", "\\'")
return value
|
1aa6e5f2325bfc293dff6e34b77117284d5bd018
| 698,196 |
import json
def init_population(ind_init, filename):
"""
create initial population from json file
ind_init: [class] class that and individual will be assigned to
filename: [string] string of filename from which pop will be read
returns: [list] list of Individual objects
"""
with open(filename, "r") as pop_file:
contents = json.load(pop_file)
return list(ind_init(c) for c in contents)
|
015b4d6b81dc44e325393535fc7abd65ca26db55
| 698,197 |
import random
def shuffle(l):
"""
Returns the shuffled list.
"""
l = list(l)
random.shuffle(l)
return l
|
4945d5d57ecaf3c9b8340f3f20c5e960938f3914
| 698,198 |
def vec2num(vec):
"""Convert list to number"""
num = 0
for node in vec:
num = num * 10 + node
return num
|
ffce787a02bc9f2bc5669dd8fb622bdff1fc941b
| 698,205 |
from datetime import datetime
def _iardict_to_fil_header(iardict: dict) -> dict:
"""Build dict header from dict iar."""
source_name = iardict["Source Name"]
source_ra = iardict["Source RA (hhmmss.s)"]
source_dec = iardict["Source DEC (ddmmss.s)"]
# ref_dm = iardict["Reference DM"]
# pul_period = iardict["Pulsar Period"]
# high_freq = iardict["Highest Observation Frequency (MHz)"]
telescope_id = int(iardict["Telescope ID"])
machine_id = int(iardict["Machine ID"])
data_type = int(iardict["Data Type"])
# observing_time = int(iardict["Observing Time (minutes)"])
# gain = iardict["Gain (dB)"]
# bandwidth = int(iardict["Total Bandwith (MHz)"])
avg_data = int(iardict["Average Data"])
sub_bands = int(iardict["Sub Bands"])
# ---- ROACH ----
# values
fft_pts = 128
adc_clk = 200e6
# parameters
tsamp = avg_data * fft_pts / adc_clk
f_off = adc_clk / fft_pts * 1e-6
time_now = datetime.now().strftime("_%Y%m%d_%H%M%S")
# tsamp = 1e6 / float(bandwidth) * avg_data
rawdatafile = f"ds{avg_data}_{source_name}{time_now}.fil"
return {
"telescope_id": telescope_id,
"machine_id": machine_id,
"data_type": data_type,
"rawdatafile": rawdatafile,
"source_name": source_name,
"az_start": 0.0,
"za_start": 0.0,
"src_raj": source_ra,
"src_dej": source_dec,
"tstart": 0.0,
"tsamp": tsamp,
"fch1": 0.0,
"foff": f_off,
"nchans": sub_bands,
"nifs": 1,
"ibeam": 1,
"nbeams": 1,
}
|
920ce832d0ab9b7fa219b14572cbfa04c4c0dfc2
| 698,207 |
from typing import OrderedDict
def make_per_dataset_plot(delta_cchalf_i):
"""Make a line plot of delta cc half per group."""
d = OrderedDict()
d.update(
{
"per_dataset_plot": {
"data": [
{
"y": [i * 100 for i in list(delta_cchalf_i.values())],
"x": list(delta_cchalf_i.keys()),
"type": "scatter",
"mode": "lines",
}
],
"layout": {
"title": "Delta CC-Half vs group",
"xaxis": {"title": "Group number"},
"yaxis": {"title": "Delta CC-Half"},
},
}
}
)
return d
|
219b7a981f896c7723e8f6ccde16819703503e54
| 698,210 |
def merge_sort(array):
"""
An implementation of the merge sort algorithm. Recursively sorts arrays
by calling merge sort on halves of the array, then merging by comparing
the first element of each array, then adding the smaller of the two to a
the sorted array.
"""
# Base Cases
if len(array) == 1 or len(array) == 0:
return array
elif len(array) == 2:
if array[0] > array[1]:
array[0], array[1] = array[1], array[0]
return array
else:
i, j, sort = 0, 0, []
# Split the array into 2 equal parts and sort them
middle = int(len(array)/2)
left = merge_sort(array[:middle].copy())
right = merge_sort(array[middle:].copy())
# Merge the sorted halves
ll, lr = len(left), len(right)
while i < ll and j < lr:
if left[i] <= right[j]:
sort.append(left[i]); i += 1
else: sort.append(right[j]); j += 1
# Add anything left over
sort += left[i:]
sort += right[j:]
return sort
|
659260d8a9b9160d0576c62d17dadd91925fb05c
| 698,211 |
def vectorize_cst(value, cst_precision):
""" if cst_precision is a vector format return a list uniformly initialized
with value whose size macthes the vector size of cst_precision, else
return the scalar value """
if cst_precision.is_vector_format():
return [value] * cst_precision.get_vector_size()
else:
return value
|
9a5ba282309f8134007084e32a4a58876ec05d73
| 698,213 |
def split_me(strings):
"""
Function to split strings into an array to loop later
from the template.
"""
splited_values = strings.split(",")
return splited_values
|
2b62cccac7602054b0e5cde34d0dbfc9964c0970
| 698,218 |
def hamming_distance(Subspace1, Subspace2):
"""
Returns the Hamming distance between to subspaces.
Variables that are free in either subspace are ignored.
**arguments**:
* *Subspace1, Subspace2* (dict): subspaces in dictionary representation
**returns**:
* *Distance* (int): the distance between *Subspace1* and *Subspace2*
**example**:
>>> hamming_distance({"v1":0,"v2":0}, {"v1":1,"v2":1})
2
>>> hamming_distance({"v1":1}, {"v2":0})
0
"""
return len([k for k, v in Subspace1.items() if k in Subspace2 and Subspace2[k] != v])
|
f42d13bd1d980b092235aa472a24a1cebe078f44
| 698,219 |
from pathlib import Path
def find_path(paths):
"""Given a search path of files or directories with absolute paths, find
the first existing path.
Args:
paths (list): A list of strings with absolute paths.
Returns:
str: The first path in the list `paths` that exists, or `None` if
none of the paths exist.
Example:
The following example works if the file system has a file
/usr/local/etc/snips.toml (e.g. on macOS with Snips installed):
>>> find_path(['/etc/snips.toml', '/usr/local/etc/snips.toml'])
'/usr/local/etc/snips.toml'
"""
for name in paths:
path = Path(name)
if path.exists():
return str(path.resolve())
# If none of the paths in the search path are found in the file system,
# return None.
return None
|
53dda06e55c26d0fce43bca5eea0cdc3fca53b11
| 698,224 |
def time_to_str(t):
""" Turns time objects to strings like '08:30' """
return t.strftime('%H:%M')
|
6427b9267b63dc6d75dce2afae24bbbd58b0b0dd
| 698,227 |
def test(one, two, three, **kwargs) -> int:
"""Return the sum of the arguments."""
return one + two + three + kwargs["four"] + kwargs["five"] + kwargs["six"]
|
c48a6968b6d4f1c673f7749e810d8d360e8d8d20
| 698,231 |
def _hierarch_keywords(names):
"""
Prepend the 'HIERARCH ' string to all keywords > 8 characters
Avoids FITS VerifyWarning.
Parameters
----------
names : list
keywords
Returns
-------
new_names : list
keywords with HIERARCH prepended as apprpriate
"""
new_names = []
for cname in names:
if len(cname) >= 8:
new_names.append(f"HIERARCH {cname}")
else:
new_names.append(cname)
return new_names
|
73fc3f1594bf12a7c4a2c79d320aa490eb3d39e9
| 698,235 |
def get_size_of_corpus(filepaths):
""" Given a list of filepaths, it will return the total number of lines
Parameters
----------
filepaths : [ str ]
A list of filepaths
Returns
-------
num_lines : int
The total number of lines in filepaths
"""
def blocks(files, size=65536):
while True:
b = files.read(size)
if not b:
break
yield b
num_lines = 0
for filepath in filepaths:
with open(filepath, encoding="utf-8") as f:
num_lines += sum(bl.count("\n") for bl in blocks(f))
return num_lines
|
46d166f7ae21ed345c45c38b782c7405790a8de2
| 698,239 |
from struct import unpack
from typing import Union
from pathlib import Path
def _tiff2xml(path: Union[Path, str]) -> bytes:
"""Extract OME XML from OME-TIFF path.
This will use the first ImageDescription tag found in the TIFF header.
Parameters
----------
path : Union[Path, str]
Path to OME TIFF.
Returns
-------
xml : str
OME XML
Raises
------
ValueError
If the TIFF file has no OME metadata.
"""
with Path(path).open(mode="rb") as fh:
try:
offsetsize, offsetformat, tagnosize, tagnoformat, tagsize, codeformat = {
b"II*\0": (4, "<I", 2, "<H", 12, "<H"),
b"MM\0*": (4, ">I", 2, ">H", 12, ">H"),
b"II+\0": (8, "<Q", 8, "<Q", 20, "<H"),
b"MM\0+": (8, ">Q", 8, ">Q", 20, ">H"),
}[fh.read(4)]
except KeyError as e:
raise ValueError(f"{path!r} does not have a recognized TIFF header") from e
fh.read(4 if offsetsize == 8 else 0)
fh.seek(unpack(offsetformat, fh.read(offsetsize))[0])
for _ in range(unpack(tagnoformat, fh.read(tagnosize))[0]):
tagstruct = fh.read(tagsize)
if unpack(codeformat, tagstruct[:2])[0] == 270:
size = unpack(offsetformat, tagstruct[4 : 4 + offsetsize])[0]
if size <= offsetsize:
desc = tagstruct[4 + offsetsize : 4 + offsetsize + size]
break
fh.seek(unpack(offsetformat, tagstruct[-offsetsize:])[0])
desc = fh.read(size)
break
else:
raise ValueError(f"No OME metadata found in file: {path}")
if desc[-1] == 0:
desc = desc[:-1]
return desc
|
6bd35d09c5edfd6362379ae3375e888e99f4609a
| 698,242 |
def DistanceFromMatrix(matrix):
"""Returns function(i,j) that looks up matrix[i][j].
Useful for maintaining flexibility about whether a function is computed
or looked up.
Matrix can be a 2D dict (arbitrary keys) or list (integer keys).
"""
def result(i, j):
return matrix[i][j]
return result
|
f3cb95aace0cfe70bfeef9d8778947df16cdd4b1
| 698,249 |
from pathlib import Path
def homedir() -> str:
"""Return the user's home directory."""
return str(Path.home())
|
dbd65e7db4cdbf2bd06c1ab42ed1ea3503e14ac2
| 698,250 |
def quote_aware_split(string, delim=',', quote='"'):
""" Split outside of quotes (i.e. ignore delimiters within quotes."""
out = []
s = ''
open_quote=False
for c in string:
if c == quote:
open_quote = not open_quote
if c == delim and not open_quote:
out += [s]
s = ''
else:
s += c
return out + [s]
|
0ef5f7040eee2a041fa1b6e6d8bf3a773a80f5f9
| 698,256 |
def LMpM_total_size(ell_min, ell_max):
"""Total array size of Wigner D matrix
Assuming an array (e.g., Wigner D matrices) in the order
[[ell,mp,m] for ell in range(ell_min, ell_max+1)
for mp in range(-ell,ell+1)
for m in range(-ell,ell+1)]
this function returns the total size of that array.
This can be calculated in sympy as
from sympy import symbols, summation
ell,ell_min,ell_max = symbols('ell,ell_min,ell_max', integer=True)
summation((2*ell + 1)**2, (ell, ell_min, ell_max))
"""
# raw output is: 4*ell_max**3/3 + 4*ell_max**2 + 11*ell_max/3 - 4*ell_min**3/3 + ell_min/3 + 1
# We rearrange that to act more nicely
return (((4 * ell_max + 12) * ell_max + 11) * ell_max + (-4 * ell_min ** 2 + 1) * ell_min + 3) // 3
|
7a6175640236ec3bc905d3b458f99eedbc792e09
| 698,262 |
def get_fcurve_data_path_property(fcurve):
""" Gets fcurve's data path property
Example fcurve's data_path: 'sequence_editor.sequences_all["Transform"].scale_start_x'
For that example path this function will return 'scale_start_x'
:param fcurve: Fcurve instance to get data path from
:return: The last component of data path defining actual property name
"""
# we want to extract last part - data path
data_path_full = fcurve.data_path
last_dot_index = data_path_full.rfind(".")
return data_path_full[(last_dot_index+1):]
|
0a7ce5fecdaa5cb1fe0024a18f6b6f057b5fe6cb
| 698,263 |
def align(offset, alignment):
"""
Return the offset aligned to the nearest greater given alignment
Arguments:
- `offset`: An integer
- `alignment`: An integer
"""
if offset % alignment == 0:
return offset
return offset + (alignment - (offset % alignment))
|
1f9d8fd4d4ac7798e14ee92d83510bb4b0ba09aa
| 698,266 |
def parse_property_string(prop_str):
"""
Generate valid property string for extended xyz files.
(ref. https://libatoms.github.io/QUIP/io.html#extendedxyz)
Args:
prop_str (str): Valid property string, or appendix of property string
Returns:
valid property string
"""
if prop_str.startswith("Properties="):
return prop_str
return "Properties=species:S:1:pos:R:3:" + prop_str
|
24a865dcf2cba5b5f840e3e682fd58486b658355
| 698,267 |
from typing import List
def _format_plugin_names_and_versions(plugininfo) -> List[str]:
"""Format name and version of loaded plugins."""
values: List[str] = []
for _, dist in plugininfo:
# Gets us name and version!
name = f"{dist.project_name}-{dist.version}"
# Questionable convenience, but it keeps things short.
if name.startswith("pytask-"):
name = name[7:]
# We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
|
2313f2f60e71be2209ebb201297d44e8dcad513a
| 698,272 |
def star_sub_sections(body:str):
"""
Change
\subsection
\subsubsection
To:
\subsection*
\subsubsection*
"""
body = body.replace(r'\subsection',r'\subsection*')
body = body.replace(r'\subsubsection',r'\subsubsection*')
return body
|
06583d16c76393edb614955c4e3786b292c5fa51
| 698,273 |
def rjd_to_mjd(rjd):
"""
RJD (Reduced Julian Date)
days elapsed since 1858-11-16T12Z (JD 2400000.0)
MJD (Modified Julian Date)
days elapsed since 1858-11-17T00Z (JD 2400000.5)
This function transforms RJD in MJD
"""
return rjd - 0.5
|
2b1f4d830670f754fbbc2259face4d261877d335
| 698,275 |
def example_function(a: int) -> int:
"""Takes an integer as input and return it's square
Parameters
----------
a : int
input number
Returns
-------
int
square of a
"""
return a ** 2
|
fd519e72ec385aa905868facee198b3eaf57f778
| 698,277 |
import doctest
def _quiet_testmod(module):
"""
Run all of a modules doctests, not producing any output to stdout.
Return a tuple with the number of failures and the number of tries.
"""
finder = doctest.DocTestFinder(exclude_empty=False)
runner = doctest.DocTestRunner(verbose=False)
for test in finder.find(module, module.__name__):
runner.run(test, out=lambda x: True)
return (runner.failures, runner.tries)
|
2c788dd128214e7c230124636aad3b0630060dd1
| 698,280 |
def _pango_attr_list_types(attributes):
"""Returns the types of all attributes in the given Pango.AttrList."""
# Pango.AttrList does not appear to have any normal ways to access its
# contents, so this is a bit of a hack.
types = []
attributes.filter(lambda attribute: types.append(attribute.klass.type))
return types
|
d14a28fd88eb60cbe8190efaf09e1b5081c437c5
| 698,282 |
import colorsys
def hue_sat_to_cmap(hue, sat):
"""Mkae a color map from a hue and saturation value.
"""
# normalize to floats
hue = float(hue) / 360.0
sat = float(sat) / 100.0
res = []
for val in range(256):
hsv_val = float(val) / 255.0
r, g, b = colorsys.hsv_to_rgb(hue, sat, hsv_val)
res.append((r, g, b))
return res
|
816cee4bbf69ee466cdade149b4f4b4547e9b29a
| 698,284 |
import jinja2
def render_template(template_file, template_vars, searchpath="./templates/"):
"""
Render a jinja2 template
"""
templateLoader = jinja2.FileSystemLoader(searchpath=searchpath)
template_env = jinja2.Environment(loader=templateLoader)
template = template_env.get_template(template_file)
return template.render(template_vars)
|
d15f5d6e120a22ee5735dbf5b1f8c324a5cae861
| 698,287 |
def dict_sort(dictionary: dict) -> list:
"""Takes in a dictionary with integer values and outputs a list of the keys sorted by their associated values in descending order."""
return list(reversed(sorted(dictionary, key=dictionary.__getitem__)))
|
330e6033a9e511341e5d9216a385b366a09eed9c
| 698,289 |
import re
def get_citation_form(attributes):
""" Compute the citation form of a pronominal mention.
Args:
attributes (dict(str, object)): Attributes of the mention, must contain
the key "tokens".
Returns:
str: The citation form of the pronoun, one of "i", "you", "he", "she",
"it", "we", "they" and None.
"""
pronoun = attributes["tokens"][0]
pronoun = pronoun.lower()
if re.match("^(he|him|himself|his)$", pronoun):
return "he"
elif re.match("^(she|her|herself|hers|her)$", pronoun):
return "she"
elif re.match("^(it|itself|its)$", pronoun):
return "it"
elif re.match("^(they|them|themselves|theirs|their)$", pronoun):
return "they"
elif re.match("^(i|me|myself|mine|my)$", pronoun):
return "i"
elif re.match("^(you|yourself|yourselves|yours|your)$", pronoun):
return "you"
elif re.match("^(we|us|ourselves|ours|our)$", pronoun):
return "we"
|
2e02063ae0694e7ce76e3be8ce111d9550de7697
| 698,290 |
def bitpos_from_mask(mask, lsb_pos=0, increment=1):
"""
Turn a decimal value (bitmask) into a list of indices where each
index value corresponds to the bit position of a bit that was set (1)
in the mask. What numbers are assigned to the bit positions is controlled
by lsb_pos and increment, as explained below.
:param mask: a decimal value used as a bit mask
:param lsb_pos: The decimal value associated with the LSB bit
:param increment: If this is +i, then the bit next to LSB will take
the decimal value of lsb_pos + i.
:return: List of bit positions where the bit was set in mask
"""
out = []
while mask:
if mask & 0x01:
out.append(lsb_pos)
lsb_pos += increment
mask >>= 1
return sorted(out)
|
d7938258c0f2dc523720bc82a48ecba1c2342223
| 698,293 |
def url_compose(camera):
""" Compose URL string for the camera """
# rtsp://88.204.57.242:5533/user=admin&password=******&channel=1&stream=1.sdp?
url: str = f'rtsp://{camera.ip_addr}:{camera.port}/user={camera.login}&password={camera.password}&channel=1&stream=0.sdp?'
return url
|
62bf81b993e95a4c5ed7ca8362220cf16102c877
| 698,295 |
def get_first_package_name(name):
"""
Returns first package name.
From `a.b.c` it returns `a`.
:param str name: Full module name
:return: First package name.
:rtype: str
"""
return name.split(".")[0]
|
814b175356ef715af46f788dc9eac7e776884b30
| 698,298 |
def _parse_url_work_relation(response):
""" response is
{'resource': 'https://imslp.org/wiki/7_Bagatelles,_Op.33_(Beethoven,_Ludwig_van)',
'relations': [{'source-credit': '',
'target-credit': '',
'type-id': '0cc8527e-ea40-40dd-b144-3b7588e759bf',
'type': 'download for free',
'end': None,
'direction': 'forward',
'ended': False,
'begin': None,
'target-type': 'work',
'work': {'title': '7 Bagatelles, op. 33',
'attributes': [],
'languages': [],
'disambiguation': '',
'type': None,
'iswcs': [],
'type-id': None,
'id': '94a19e47-2c1d-425b-b4f0-63d62d5bf788',
'language': None},
'attribute-values': {},
'attribute-ids': {},
'attributes': []}],
'id': '2d264d6e-5082-46a7-a60a-e2d02ab103e1'}
"""
relations = response.get('relations', [])
if relations:
return relations[0]['work']['id']
|
4e36330be029846bc44c235dafb20e6dc9c5f58f
| 698,299 |
import toml
import yaml
import json
def mock_settings_file(request, monkeypatch, tmpdir, file_extension):
"""Temporarily write a settings file and return the filepath and the expected settings outcome."""
ext = file_extension
p = tmpdir.mkdir("sub").join("settings" + ext)
expected_result = {"testgroup": {"testvar": 123}, "othergroup": {"blabla": 555}}
if ext == ".toml":
p.write(toml.dumps(expected_result))
elif ext in [".yml", ".yaml"]:
p.write("---\n" + yaml.dump(expected_result))
elif ext == ".json":
p.write(json.dumps(expected_result))
else: # pragma: nocover
raise NotImplementedError("Invalid file extension :{}.".format(ext))
return str(p), expected_result
|
4899ebbc9efbe31a931387d4a09dcb8c727eaf8d
| 698,303 |
def replicate_idx(head):
"""Return a list of indices representing replicate groups"""
h = head
g = h.groupby(['Sample Name', 'Measurement day'])
idx = [i[1].index for i in g]
return(idx)
|
ac5b80de8d15ef283185b0e93d495516695aa936
| 698,308 |
def str2ascii(string: str) -> list:
"""Convert a string to a list of ascii-codes"""
return [ord(i) for i in string]
|
a938b0c585e78a455721e9d17e8915b0769a025f
| 698,310 |
def rgbToHexColor(r, g, b):
"""Convert r, g, b to #RRGGBB."""
return f'#{int(r):02X}{int(g):02X}{int(b):02X}'
|
5cff9abc67c235a4f0fdf258ea555f018d80d1ad
| 698,311 |
def filter_refgene_ensgene_exon(var_df_per_chrom, exon_class,
refgene, ensgene):
"""Filter for a refgene function, ensembl function or both.
Args:
var_df_per_chrom (:obj:`DataFrame`): all variants in a chromosome
variant_class (:obj:`str`): annovar variant class to filter
on (default None)
exon_class (:obj:`str`): annovar EXON class to filter
on (default None)
refgene (:obj:`boolean`): if used RefSeq to define variant classes
ensgene (:obj:`boolean`): using ENSEMBL to define variant classes
Returns:
var_df_per_chrom (:obj:`DataFrame`): only variants in the
desired `exon_class`
Description:
First prepends a ^ so that only the highest impact `exon_class`
is considered as the de-facto class for filtering.
"""
exon_class = "^" + exon_class
if not refgene and not ensgene:
print("Using RefGene for filtering")
refgene = True
if refgene:
vars_refgene = var_df_per_chrom.exon_func_refgene.str.contains(
exon_class, regex=True)
var_df_per_chrom = var_df_per_chrom[vars_refgene]
if ensgene:
vars_ensgene = var_df_per_chrom.exon_func_ensgene.str.contains(
exon_class, regex=True)
var_df_per_chrom = var_df_per_chrom[vars_ensgene]
return var_df_per_chrom
|
905db5cab873eabce8ee24872231f2dacabcec29
| 698,314 |
def _lin_f(p, x):
"""Basic linear regression 'model' for use with ODR.
This is a function of 2 variables, slope and intercept.
"""
return (p[0] * x) + p[1]
|
52bc228dd48ee7939fa60cd052989de70a44b197
| 698,315 |
def mock_user(request):
"""Define a mock user to be used when testing REST API services"""
user = dict(
id="test_id",
name="User Name",
description="I'm a test user",
url="someurl",
)
return user
|
ff6eee62cd27328130bcd37c6164d3b4b17e2558
| 698,316 |
def get_type_as_string(instance: object) -> str:
"""
>>> x='a'
>>> get_type_as_string(x)
'str'
>>> x=1
>>> get_type_as_string(x)
'int'
>>> import decimal
>>> x=decimal.Decimal(1.00)
>>> get_type_as_string(x)
'Decimal'
>>> x=[]
>>> get_type_as_string(x)
'list'
"""
return type(instance).__name__
|
5fd2dd4362a98b2121f9ad2eb4e44ead83446327
| 698,317 |
def is_property(class_to_check, name):
"""Determine if the specified name is a property on a class"""
if hasattr(class_to_check, name) and isinstance(
getattr(class_to_check, name), property
):
return True
return False
|
0f7dce28f1e78e8b6b937a5a6536060886666371
| 698,321 |
from string import ascii_uppercase
def get_cluster_label(cluster_id):
"""
It assigns a cluster label according to the cluster id that is
supplied.
It follows the criterion from below:
Cluster id | Cluster label
0 --> A
1 --> B
2 --> C
25 --> Z
26 --> AA
27 --> AB
28 --> AC
Parameters
----------
cluster_id : int
The id of the cluster that will be used to generate the label
Returns
-------
cluster_label : str
The cluster label according to the supplied id and the criterion
mentioned above
"""
cluster_label = ''
current_index = cluster_id
while current_index >= 0:
if current_index < len(ascii_uppercase):
cluster_label += ascii_uppercase[current_index]
else:
for letter in reversed(cluster_label):
if letter != 'Z':
idx = ascii_uppercase.index(cluster_label[-1])
cluster_label = \
cluster_label[:-1] + ascii_uppercase[idx + 1]
break
else:
cluster_label = 'A' + cluster_label
current_index -= 26
return cluster_label
|
06210786dfb375dce31ec3c014cc5f545d019c50
| 698,323 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.