content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def get_policy_version(client, arn: str) -> str:
"""Get the default version of the policy with this ARN.
This is opposite of the AWS API name where `get_policy` gives you information about a policy's
current version but not the policy's contents.
"""
policy_info = client.get_policy(PolicyArn=arn)
return policy_info["Policy"]["DefaultVersionId"] | cc9c7694bd861b0ef7305cc382b564c896ae844a | 329,718 |
import typing
import itertools
def groupby_apply (
data: typing.Iterable[typing.Any],
keyfunc: typing.Callable,
applyfunc: typing.Callable,
) -> typing.List[typing.Tuple[typing.Any, typing.Any]]:
"""
GroupBy using a key function and an apply function, without a `pandas`
dependency.
See: <https://docs.python.org/3/library/itertools.html#itertools.groupby>
data:
iterable
keyfunc:
callable to define the key by which you want to group
applyfunc:
callable to apply to the group
returns:
an iterable with the accumulated values
"""
data = sorted(data, key=keyfunc)
accum: typing.List[typing.Tuple[typing.Any, typing.Any]] = [
(k, applyfunc(g),)
for k, g in itertools.groupby(data, keyfunc)
]
return accum | fe301e2d279fd8ac47d04583e0a5cdb812ceb90d | 528,272 |
def number_axles(num_axles):
"""Numbers the axles starting with 1."""
axle_num = []
for i in range(num_axles):
axle_num.append(i+1)
return axle_num | b4bd09151e84968951cb3227862282897c007005 | 564,134 |
def _dot(
vec1,
vec2,
):
"""Dot product between vec1 and vec2"""
return sum(vec1 * vec2) | fac8144bb691669f180e1d2114a9711ac03fb25b | 511,871 |
def extract_slug(url):
"""Extract `this-is-the-slug` given an url for the form `https://blog.kitware.com/this-is-the-slug/`
or `https://blog.kitware.com/this-is-the-slug`
"""
if url.endswith("/"):
url = url[:-1]
return url.split("/")[-1] | 4590894bfdf409709e5136e3a4468f5256253113 | 389,275 |
def get_shortest_path(path_list):
"""
Return the shortest path (the uppest path)
(Used for find the uppest path for the files with the same filename)
Parameter
path_list: list[path-like str], e.g. ["./db.json", "./test/db.json", "./config/db.json"]
Return
return the shortest path, e.g. "./db.json"
"""
path_len = [len(path_i) for path_i in path_list]
return path_list[path_len.index(min(path_len))] | 6790705c3242c2b6925f42e9ccc826b12da7c425 | 622,409 |
def sequence_id(data: list, ids: list) -> list:
"""
Filter out images that do not have the sequence_id in the list of ids
:param data: The data to be filtered
:type data: list
:param ids: The sequence id(s) to filter through
:type ids: list
:return: A feature list
:rtype: list
"""
return [feature for feature in data if feature["properties"]["sequence_id"] in ids] | 0c2fbfd150383a9d664d60dd05f77fffbbd012f4 | 464,945 |
import re
def remove_special(s):
"""Remove all special characters from the given string"""
return re.sub(r"[?|:*/\\<>\"]+", '', s) | 5db21e06ea74927120be73b87ef20868d561f413 | 59,857 |
def create_file_name(path, start):
"""Create the name of rst file.
Example:
resources.libraries.python.honeycomb.rst
tests.perf.rst
:param path: Path to a module to be documented.
:param start: The first directory in path which is used in the file name.
:type path: str
:type start: str
:returns: File name.
:rtype: str
"""
dir_list = path.split('/')
start_index = dir_list.index(start)
return ".".join(dir_list[start_index:-1]) + ".rst" | 398a5e7749cc3f7f47a068dd5859b28fb0ffe98d | 38,760 |
import logging
def select_best_haplotype_match(all_matches):
"""Returns the best HaplotypeMatch among all_matches.
The best matching HaplotypeMatch is the one with the lowest match_metrics
score.
Args:
all_matches: iterable[HaplotypeMatch]. An iterable of HaplotypeMatch objects
we want to select the best match from.
Returns:
The best matching HaplotypeMatch object.
"""
sorted_matches = sorted(all_matches, key=lambda x: x.match_metrics)
best = sorted_matches[0]
equivalents = [
f for f in all_matches if f.match_metrics == best.match_metrics
]
# redacted
if len(equivalents) > 1:
for i, f in enumerate(equivalents):
extra_info = 'best' if i == 0 else i
logging.warning('Equivalent match to best: %s [%s]', f, extra_info)
return equivalents[0] | 0e40fef830055e5cd297b0f00672d8b0caedc62e | 704,380 |
def rsrpad(data: bytes, block_size_bytes: int) -> bytes:
"""Return data padded to match the specified block_size_bytes.
Arguments:
data {bytes} -- Data string to pad.
block_size_bytes {int} -- Block size for padding.
Returns:
bytes -- Padded data.
The first byte of the padding is null to match the rocksmith standard,
the remainder fill with the count of bytes padded (RS appears to use
random chars).
"""
padding = (block_size_bytes - len(data)) % block_size_bytes
if padding > 0:
null_bytes = 1
pad_byte = chr(padding).encode()
padding -= 1
else:
null_bytes = 0
pad_byte = b"\x00"
return data + b"\x00" * null_bytes + pad_byte * padding | a449d42200a626cb97a90e256f46b3ea8de2d1e6 | 79,892 |
import re
def is_mdf_usgs_id(name):
"""
Is this an MDF directory name?
:type name: str or unicode or None
:rtype: bool
>>> is_mdf_usgs_id('LC80920740862013090LGN00')
True
>>> is_mdf_usgs_id('LC81070620632013228ASA00')
True
>>> is_mdf_usgs_id('NPP.VIIRS.7686.ALICE')
False
>>> is_mdf_usgs_id('TERRA.72239.S1A2C2D4R4')
False
>>> is_mdf_usgs_id('TERRA.72239.S1A2C2D4R4')
False
>>> is_mdf_usgs_id('LANDSAT-8.1725')
False
>>> is_mdf_usgs_id('LC80910760902013148ASA00')
True
>>> is_mdf_usgs_id('133.004.2013148000120310.ASA')
False
>>> is_mdf_usgs_id('LC80910760902013148ASA00_IDF.xml')
False
>>> is_mdf_usgs_id(None)
False
"""
if not name:
return False
return bool(re.match(r"^L[OTC]\d{17}[A-Z]{3}\d{2}$", name)) | 33a90fbebd70f19a7cf05ffd95b105c924cc6bfc | 259,030 |
def recurse_data(data, keys, bucket=None, handler=None):
"""
Performs a recursive search in data for values named by any key.
If no such keys are present at root level, goes deeper into bucket values.
If handler given, calls handler with each found value and key, otherwise
returns the first found value.
Both data and bucket contents can be dicts or lists or tuples.
"""
if not isinstance(data, (dict, list, tuple)): return None
datas = data if isinstance(data, (list, tuple)) else [data]
for item in [x for x in datas if isinstance(x, dict)]:
for key in keys:
if key in item:
if handler: handler(item, key)
else: return item[key]
if bucket in item: return recurse_data(item[bucket], keys, bucket)
return None | a7cf504b3a0b29db8d8b95007ecd3a0d4afc23d7 | 152,328 |
def are_sphere_and_aabb_colliding(sphere, aabb):
"""
Return True if given sphere and AABB are colliding.
:param SphereCollider sphere: sphere collider
:param AABBCollider aabb: AABB collider
:return: True if sphere and AABB are colliding
:rtype bool:
"""
# compute sq_dist, squared distance between sphere center and AABB
sq_dist = 0.0
for i in range(3):
v = sphere.center[i]
if v < aabb.center[i] - aabb.size3[i] / 2:
sq_dist += (aabb.center[i] - aabb.size3[i] / 2 - v)**2
if v > aabb.center[i] + aabb.size3[i] / 2:
sq_dist += (aabb.center[i] + aabb.size3[i] / 2 - v)**2
return sq_dist <= sphere.radius**2 | 41df1b99986d5fadf6766152ce703692700e4d7a | 170,724 |
from typing import List
import re
def _split_string_into_word_list(items_str: str) -> List[str]:
"""Preprocesses a string of one-word items delimited by ', ' into a sorted list of items.
:param items_str: String of items delimited by comma, e.g. 'cat, dog, bird, ...'.
:return: Sorted list of unique lowercase items.
"""
items = re.sub(r'[^\w\s]', '', items_str.lower()).split()
sorted_unique_items = sorted(set(items))
return sorted_unique_items | 246a641848e81a2c118b30c6684c2b0008e56474 | 411,505 |
import json
def load_json(file_path):
"""Return json data."""
with open(file_path, 'r', encoding='utf-8') as fd:
f = json.load(fd)
return f | ffadbc64ff5d6538e4cea50512e2b74243f8be1f | 555,813 |
def findCompleteFragments(fragments, max_dist, current_position, max_collapse_dist=20):
"""Find complete fragments that are >max_dist bp away from
the current BAM file position
Parameters
----------
fragments : dict
A dictionary containing ATAC fragment information
max_dist : int
The maximum allowed distance between fragment start and
end positions
current_position : int
The current position being looked at in the position-sorted
BAM file
max_collapse_dist : int
Maximum allowed distance for fragments from the same cell
barcode that share one Tn5 integration site (fragment
start or end coordinate) to be collapsed into a single
fragment.
Moves completed fragments to a new dictionary
Completed fragments will be deleted from the original dictionary
"""
allkey = list(fragments.keys())
completed = dict()
d = max_dist + max_collapse_dist
for key in allkey:
if fragments[key][4]: # complete fragment
if (fragments[key][2] + d) < current_position:
completed[key] = fragments[key][:-1] # removes "completed" T/F information
del fragments[key]
else:
# remove incomplete fragments that are
# too far away to ever be complete
if fragments[key][1] is None:
if (fragments[key][2] + d) < current_position:
del fragments[key]
elif fragments[key][2] is None:
if (fragments[key][1] + d) < current_position:
del fragments[key]
else:
# start and end coordinates present without a cell barcode
del fragments[key]
return completed | f4ed368da95241f8421494cbafefd4a7904b719b | 151,531 |
from typing import Set
from typing import Tuple
def get_corners(lights_length: int, lights_height: int) -> Set[Tuple[int, int]]:
"""
Gets the coordinates of the four corners, given an x size and y size
Args:
lights_length (int): x size
lights_height (int): y size
Returns:
Set[Tuple[int, int]]: A set of four (x, y) coords
"""
on_lights_to_add = set()
on_lights_to_add.add((0, 0))
on_lights_to_add.add((lights_length-1, 0))
on_lights_to_add.add((0, lights_height-1))
on_lights_to_add.add((lights_length-1, lights_height-1))
return on_lights_to_add | 3ec88fb6d7a2535e0df74b4b019f4e3b0f7ce849 | 222,662 |
def generate_checks(container, address, check_ports):
"""Generates the check dictionary to pass to consul of the form {'checks': []}"""
checks = {}
checks['checks'] = []
for p in check_ports:
checks['checks'].append(
{'id': '{}-port{}'.format(container, p),
'name': 'Check TCP port {}'.format(p),
'tcp': '{}:{}'.format(address, p),
'Interval': '30s',
'timeout': '4s'})
return checks | ebf8d76f3954510c3061eb8460fdb0c1a02036f6 | 389,305 |
def max_sublist(a_list):
""" Kadane's Algorithm
>>> max_sublist([-2, 1, -3, 4, -1, 2, 1, -5, 4])
(6, 3, 6)
>>> max_sublist([0, -1, 2,- 3, 5, 9, -5, 10])
(19, 4, 7)
:param a_list: The list to get the maximum sub-list for.
:return: The sum from the sublist, the start index, and the end index. The last two are for testing.
"""
max_ending_here = max_so_far = a_list[0]
current_index = 0
start_index = 0
end_index = 0
for num in a_list:
max_ending_here = max(0, max_ending_here + num)
if max_ending_here >= max_so_far:
end_index = current_index
if max_ending_here == 0:
start_index = current_index + 1
max_so_far = max(max_so_far, max_ending_here)
current_index += 1
return max_so_far, start_index, end_index | e535a182e0a0118395dfd6f6e130a42d3c4b051f | 29,795 |
from functools import reduce
from operator import getitem
def getitems(array, values):
"""
Equivalent to array[*values]
"""
return reduce(getitem, values, array) | 39b21ba6b7d9a46fdfeb4da7dba9198548e57ac9 | 110,281 |
def extract_global_label(label_list):
"""This function extracts the global label of the report from the input annotation list
Args:
label_list (list): it contains all the classification annotations
Returns:
global_label (int): value corresponding to the global class (0 = stable, 1 = response, 2 = progression, 3 = unknown)
Raises:
AssertionError: if more than one value is found for the global class
ValueError: if the global label was not found
"""
only_global = [x for x in label_list if "Global" in x]
assert len(only_global) == 1, "Only one value should be retrieved"
if only_global[0] == " Global_stable":
global_label = 0
elif only_global[0] == " Global_response":
global_label = 1
elif only_global[0] == " Global_progression":
global_label = 2
elif only_global[0] == " Global_unknown":
global_label = 3
else:
raise ValueError("global label not found")
return global_label | 083e21c01f56b7d7dd1a7f179dfd0857841e04bb | 296,014 |
def generate_cluster_stack_name(job):
"""
Given a job, generate a name for an associated compute cluster resource.
Since this becomes an AWS (or OpenStack?) Stack name via CloudFormation
it can only contain alphanumeric characters (upper and lower) and hyphens
and cannot be longer than 128 characters.
:param job: A Job instance (with ComputeResource assigned)
:type job: Job
:return: A cluster ID to use as the stack name.
:rtype: str
"""
return "cluster-%s----%s" % (job.compute_resource.id, job.id) | 11394651bb9564e8caeab5cee7fd9e8dc8f48d43 | 140,135 |
def _Z(order):
"""
Returns the constants for Yoshida Triple Jump.
Used to compose higher order (even) integrators.
References
----------
.. [1] Yoshida, Haruo,
"Construction of higher order symplectic integrators";
Physics Letters A, vol. 150, no. 5-7, pp. 262-268, 1990.
`DOI: <https://doi.org/10.1016/0375-9601(90)90092-3>`__
"""
n = (order - 2) / 2
x = 2 ** (1 / (2 * n + 1))
Z0 = -x / (2 - x)
Z1 = 1 / (2 - x)
return Z0, Z1 | 596f82ac02c896a097d0480c7ee69f7df9f75593 | 250,920 |
import re
def checkIPAddr(ipAddr: str) -> bool:
"""
Check IP Address
Parameters
----------
ipAddr: str
IP Address from UI
Returns
----------
boolean
"""
pattern = r'\d{1,3}'
lst_ipAddr = ipAddr.split('.')
if len(lst_ipAddr) != 4:
print('number is false')
return False
for item in lst_ipAddr:
if re.fullmatch(pattern, item) is None:
print('pattern mismatch')
return False
return True | a73c3699d01186177f92625cee755b98aebbccb1 | 623,453 |
import math
def yield_strength(impactor_density_kgpm3):
"""
Yield strength equation for breakup altitude calculation. Only valid for density range 1000 to 8000.
:param impactor_density_kgpm3: Impactor density in kg/m^3
:returns: Yield Strength in Pascals.
:Reference: EarthImpactEffect.pdf, Equation 10
"""
return 10 ** (2.107 + (0.0624 * math.sqrt(impactor_density_kgpm3))) | b09c8011acc396eb3e43fbefe956bf52074bb619 | 685,607 |
import json
def from_json(data, **kwargs):
"""Reads data from json str.
:param str data: data to read
:param kwargs kwargs: kwargs for json loads
:return: read data
:rtype: dict
"""
return json.loads(data, **kwargs) | 17af33000478bbb364ffca7676d46eb3b0afccfa | 59,601 |
def get_refinement_neighbors(leaf,extent=2):
"""
Get the list of neighbors used for refinement.
This combines the neighbor and upper_neighbor list into
one final list of neighbors
Parameters
----------
leaf : NDTree.node
The leaf node we are evaluating
extent : int
The extent of the stencil, -extent,...,0,...,extent
Returns
-------
final_list : list
The final list of neighbors
"""
offsets, neighbor_indices,neighbors, upper_neighbors = leaf.find_neighbors(extent=extent)
total_neighbors = len(neighbors)
# Even if already tagged, still need to check new neighbors
final_list = [None]*total_neighbors
for i in range(total_neighbors):
if upper_neighbors[i] is not None:
node = upper_neighbors[i]
if not node.leaf:
node = neighbors[i]
final_list[i] = node
return final_list | fda4be8c516c09e7e7d7a83098117bcfe69228c5 | 215,647 |
def make_destination_paths_map(source_paths, destination_dir_path,
strip_prefix=None):
"""Create a mapping of source paths to destination paths.
Args:
source_paths: An iterable of absolute paths.
destination_dir_path: A destination directory path.
strip_prefix: A path prefix to strip from source paths.
Raises:
ValueError: On invalid input of source_paths or strip_prefix.
Returns:
A mapping of source paths to destination paths.
"""
# Assume that source_paths and destination_dir_path have already been
# validated to avoid re-processing (and since they'd fail at lower layers).
if not hasattr(source_paths, '__iter__'):
raise ValueError(
'"source_paths" must be an iterable. Got: %r' % source_paths)
# Add trailing slash to destination_dir_path and strip_prefix if not present.
if not destination_dir_path.endswith('/'):
destination_dir_path += '/'
if strip_prefix and not strip_prefix.endswith('/'):
strip_prefix += '/'
elif not strip_prefix:
strip_prefix = '/'
destination_map = {}
for source_path in source_paths:
if not source_path.startswith(strip_prefix):
raise ValueError(
'Mismatch of source_paths and strip_prefix: could not strip '
'%r from source path %r.' % (strip_prefix, source_path))
temp_source_path = source_path[len(strip_prefix):]
destination_path = destination_dir_path + temp_source_path
destination_map[source_path] = destination_path
return destination_map | 629cf1ed47595b1365ef63939f9f835e7b97fd81 | 372,253 |
def single_search(regex, string, default=None):
"""First-match search of a string given a regex. Catch non-match into default."""
match = regex.search(string)
if not match:
return default
return match.group(0) | d03f4797d8778d0a1ae58c30953139a19577f88f | 524,710 |
import re
import string
def clean_text(text):
"""
Function to clean the text.
Parameters:
text: the raw text as a string value that needs to be cleaned
Returns:
cleaned_text: the cleaned text as string
"""
# convert to lower case
cleaned_text = text.lower()
# remove HTML tags
html_pattern = re.compile('<.*?>')
cleaned_text = re.sub(html_pattern, '', cleaned_text)
# remove punctuations
cleaned_text = cleaned_text.translate(
str.maketrans('', '', string.punctuation))
return cleaned_text.strip() | 7fe5bf6dd6576f25ca2dec61a696a5598a6f9f0c | 357,742 |
def bf4_text(bf4_element):
""" Checks BeautifulSoup element for existence and returns str"""
return "" if bf4_element is None else bf4_element.text | b3b01460cc34229622e5f9f966338157d38d8dec | 376,299 |
def almost_there(n):
"""
Given an integer n, return True if n is within 10 of either 100 or 200
:param n:int
:return: bool
almost_there(90) --> True
almost_there(104) --> True
almost_there(150) --> False
almost_there(209) --> True
"""
return (abs(100 - n) <= 10) or (abs(200 - n) <= 10) | a3cc766f8f1e8db023bb4c41c833fed6fd85a857 | 590,780 |
import pickle
import base64
def ObjectFromBase64EncodedString(EncodedObject):
"""Generate Python object from a bas64 encoded and pickled
object string.
Arguments:
str: Base64 encoded and pickled object string.
Returns:
object : Python object or None.
"""
return None if EncodedObject is None else pickle.loads(base64.b64decode(EncodedObject)) | ff82b5e3a130e563a11ed7ccfa4ce80b257b4ada | 29,235 |
import re
def parse_string_into_executable_command(command, remove_quotes):
"""Function that takes in a string command, and parses it into a subprocess arg list
Parameters
----------
command : str
The command as a string
Returns
-------
run_command : list of str
The command as a list of subprocess args
"""
if '"' in command:
run_command = []
strings = re.findall('"[^"]*"', command)
non_strings = re.split('"[^"]*"', command)
for i in range(len(strings)):
run_command = run_command + non_strings[i].strip().split(' ')
string_in = strings[i]
if remove_quotes:
string_in = string_in[1:]
string_in = string_in[:(len(string_in) - 1)]
run_command.append(string_in)
if len(non_strings) == (len(strings) + 1) and len(non_strings[len(strings)]) > 0:
run_command.append(non_strings[len(strings) + 1])
else:
run_command = command.split(' ')
return run_command | 3a9c468f15628dbf337e842a468614ec7f903cd0 | 482,694 |
import itertools
def is_clique(old_graph, vertices):
"""
Tests if vertices induce a clique in the graph
Multigraphs are reduced to normal graphs
Parameters
----------
graph : networkx.Graph or networkx.MultiGraph
graph
vertices : list
vertices which are tested
Returns
-------
bool
True if vertices induce a clique
"""
subgraph = old_graph.subgraph(vertices)
# Remove selfloops so the clique is well defined
have_edges = set(subgraph.edges()) - set(subgraph.selfloop_edges())
# Sort all edges to be in the (low, up) order
have_edges = set([tuple(sorted(edge, key=int))
for edge in have_edges])
want_edges = set([
tuple(sorted(edge, key=int))
for edge in itertools.combinations(vertices, 2)
])
return want_edges == have_edges | 97184b05c1eb89b70f83552e74e04de966790036 | 650,538 |
from typing import Any
def modifier(key: str, value: Any) -> tuple[str, dict[str, Any]]:
"""Key to modifier value.
Args:
key (str): Any str with `__` and modifier.
value (Any): Any value.
Returns:
tuple[str, dict[str, Any]]: Key and dictionary value.
"""
key, suffix = key.split("__")
return (key, {"$%s" % suffix: value}) | fcc6031748cf4925626d69f1b444b82cc9f592ba | 414,656 |
import re
def make_regex(regexes):
"""
Compile regexes to be used in scanner.
:param regexes: list of regex
:return: compiled regex
"""
return re.compile('(%s)' % ('|'.join(regexes)), re.IGNORECASE|re.MULTILINE) | b35f76daf78b1de8838a95f43eab2cf1f7ec1914 | 334,133 |
def supports(cls, *interfaces):
"""Can be used to query whether a particular class supports the given API"""
if not hasattr(cls, "_implements"):
return False
for interface in interfaces:
if not interface in cls._implements:
return False
return True | 20d4a61d5c485b08cfab067c39b85423d97eb5dc | 513,985 |
def generate_ancestry_path(full_name):
"""Generate ancestry path from full_name.
Args:
full_name (str): Full name of the resource.
Returns:
str: Ancestry path.
"""
supported_ancestors = ['organization', 'folder', 'project']
ancestry_path = ''
full_name_items = full_name.split('/')
for i in range(0, len(full_name_items) - 1):
if full_name_items[i] in supported_ancestors:
ancestry_path += (full_name_items[i] + '/' +
full_name_items[i + 1] + '/')
else:
continue
return ancestry_path | 6494810aff6bc784164db40197261cf269760a2e | 105,550 |
def get_all_coins(exchanges):
"""
:param exchanges: [] of CryptoExchange
List of exchanges
:return: [] of str
List of coins in all exchanges
"""
return list(set([
coin for exchange in exchanges
for coin in exchange.build_wallets().keys()
])) | abe26199887bbfa0fbb06b1cd659bb22c62f253d | 640,832 |
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable
of ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple | d69529a5dedef1db537f6f090979dff10d2ee306 | 277,072 |
def hosts_loader(filename: str) -> dict:
""" Loads host mapping file
:param str filename: path to file
:return: mapping as dictionary
"""
return_dict = {}
with open(filename) as mapfile:
lines = [
line.strip() for line in mapfile
if not line.startswith('#') and line.strip() != ''
]
for line in lines:
splitline = line.split()
return_dict[splitline[0]] = ' '.join(splitline[1:])
return return_dict | 2d271ce94f68eb36237d397678857b6654938f52 | 636,959 |
def get_column(data, header): #new
"""
Features:
Get the data under a specific header from the dataset.
Parameters:
data: List. The whole dataset.
header: String. The header of the data that want to get.
Returns:
List of string. The data want to get.
"""
headers = data[0]
flag = 0 # to get the index of the header.
for index in range(len(headers)):
if headers[index] == header:
flag = index
result = [line[flag] for line in data[1:]]
return result | d8cdf33cb6c15fbcc846da0ba5f72048aa3d7416 | 70,007 |
def check_annot(df, img_name):
"""
return bool whether annotation available or not in df
"""
flag = []
flag.append(img_name)
#print(df.loc[df.FRAME == img_name]["FRAME"].values)
#print(df.loc[df['FRAME'].isin(flag)])
#print(df.loc[df['FRAME'].isin(flag)].shape[0])
#print(df.loc[df['FRAME'].isin(flag)])
return df.loc[df['FRAME'].isin(flag)].shape[0] != 0
#df.loc[df.FRAME == img_name]["FRAME"].values == img_name | df518ca9d8970a4eebbbe75bdde15ee84167c70a | 125,598 |
def shl(x, n):
"""Shift left n bits (zeros come in from the right)
>>> shl(0.5, 1)
1.0
>>> shl(0.1, 1)
0.2
"""
return x * 2 ** n | efb55bc038544b9bab440e6fe417cffe415a49c0 | 195,193 |
import re
def camel2snake(name):
"""
Convert name of callback by inserting underscores between small and capital
letters. For example, `TestCallback` becomes `test_callback`.
"""
pattern1 = re.compile("(.)([A-Z][a-z]+)")
pattern2 = re.compile("([a-z0-9])([A-Z])")
name = re.sub(pattern1, r"\1_\2", name)
return re.sub(pattern2, r"\1_\2", name).lower() | 6c52bdb049f81427568d94423c7046f9a10f4424 | 331,455 |
def _surface_azimuth(latitude):
"""
Returns the best surface azimuth.
South (180°) for north hemisphere,
North (0°) for south hemisphere.
"""
if latitude > 0:
surface_azimuth = 180
else:
surface_azimuth = 0
return surface_azimuth | bbefd34fcacd2c68ab11f0a8e1adf7c33e200ad0 | 153,731 |
def get_kwargs(kwargs, key, default):
"""
Get an element in kwargs or returnt the default.
:param kwargs: dictionary of keyworded arguments
:type kwargs: dict
:param key: key to retrieve
:type key: str
:param default: default value to return
:type default: mixed
:return: the retrieved value from kwargs or default
:rtype: mixed
"""
if kwargs is not None:
if key in kwargs.keys():
return kwargs[key]
return default | 8f856f60f68b73d1c8f3fd5a930118c1f8a720cf | 490,506 |
def extract_sequences(references):
"""
Return a dictionary with reference ids as keys and their corresponding
sequences as values.
:param references: Dictionary with reference models.
:rtype: dict
:return: Reference ids as keys and their corresponding sequences as values
"""
sequences = {}
for reference in references:
sequences[reference] = references[reference]["sequence"]["seq"]
return sequences | 0dcae541d1eab02c8e33b5d0c4bc240b3bb4b274 | 129,366 |
def tvi(b3, b4, b6):
"""
Transformed Vegetation Index (Broge and Leblanc, 2001).
.. math:: TVI = 0.5 * (120 * (b6 - b3) - 200 * (b4 - b3))
:param b3: Green.
:type b3: numpy.ndarray or float
:param b4: Red.
:type b4: numpy.ndarray or float
:param b6: Red-edge 2.
:type b6: numpy.ndarray or float
:returns TVI: Index value
.. Tip::
Broge, N.H., Leblanc, E., 2001. Comparing prediction power and \
stability ofbroadband and hyperspectral vegetation indices for \
estimation of green leaf area index and canopy chlorophyll density. \
Remote Sensing of Environment 76, 156-172. \
doi:10.1016/S0034-4257(00)00197-8.
"""
TVI = 0.5 * (120 * (b6 - b3) - 200 * (b4 - b3))
return TVI | f7681f078e33386e5c946d33befa605a0c8e14df | 654,994 |
def should_suspend(partial_result) -> bool:
"""Check the state of the result to determine if the orchestration should suspend."""
return bool(partial_result is not None
and hasattr(partial_result, "is_completed")
and not partial_result.is_completed) | 31155c7106b6a7e42dd2dcc7ec024d29c62a29fa | 615,155 |
import math
def diameter(circunf) -> float:
"""Returns the diameter of a circle with given surface length."""
return circunf / math.pi | 772f29137c4f23e3a37bdac12a1e46066e2c5a7f | 247,138 |
from typing import Callable
import operator
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op | 62d7af6449b8ae1039f34d83c18ea2a2c7860ad7 | 301,337 |
def stringifyOptionValue(value):
"""
Convert option value from in-memory representation to a suitable string.
In particular, boolean values are converted to '0' or '1'.
"""
if value is True:
return '1'
elif value is False:
return '0'
else:
return str(value) | 7b25ba7d31562e559640026d3dbbab2e763fbcf6 | 111,913 |
def eu_float_string_to_float(data):
"""
eu_float_string_to_float converts strings in EU format to floats
:param data: string of the float in EU conventions
:type data: str
:return: converted float from the string
:rtype: float
"""
if isinstance(data, str):
res = data.replace(".", "")
res = res.replace(",", ".")
try:
res = float(res)
except Exception as e:
raise Exception(f"Could not convert string {data} to float: {e}")
else:
raise TypeError("Input data should be string")
return res | 60963a5a2d9359abed183a74a01b94465eea3fce | 259,031 |
def get_shape(image):
"""Get the dimensions of an image."""
shape = image.shape
height = shape[0]
width = shape[1]
return height, width | 65355dc0c9eca7716e20866598da1b580d482a1e | 584,234 |
def ternary(condition, first_opernad, second_operand):
"""Same as `first_operand if condition else second_operand`"""
return first_opernad if condition else second_operand | 3d5210fbaa6fa0de0ffd4318328b2c10122254ba | 667,765 |
def SegmentsIntersect(l1, r1, l2, r2):
"""Returns true if [l1, r1) intersects with [l2, r2).
Args:
l1: int. Left border of the first segment.
r1: int. Right border (exclusive) of the second segment.
l2: int. Left border of the second segment.
r2: int. Right border (exclusive) of the second segment.
"""
return l2 < r1 and r2 > l1 | b3586a75e5612dc60b7c63b595ed3db9f9032abb | 220,039 |
def calc_acc(ta, precip, tacc=274.16, **config):
"""
calculate accumulation when precip is below threshold
:param ta: grid of current temperature (K)
:param precip: grid of current precipitation (mm)
:param tacc: temperature threshold for accumlation (K)
:return: acc: grid of accumulation in current timestep (mm)
"""
acc = precip.copy()
acc[(ta >= tacc)] = 0
return acc | 5f5474a801af90d50b9811a2eb45333c49c33147 | 622,743 |
def has_numbers(string):
"""Returns true if there are digits in the string"""
return any(char.isdigit() for char in string) | 27d741316aac6c0d0125cd35028bf589febea9a8 | 643,279 |
import base64
def _encode_bytes_if_required(data):
"""
If the specified data is represented as bytes, convert it to a UTF8 string (using Base64 encoding if the bytes do not represent valid UTF8).
This is needed because json.dumps cannot serialise bytes.
:param data: The data to encode.
:return: If the data is represented as bytes, an equivalent UTF8 or Base64-encoded string; otherwise, the original data.
"""
if (not isinstance(data, bytes)):
return data
try:
data = data.decode('utf8')
except UnicodeDecodeError:
data = base64.encodebytes(data).decode('ascii')
return data | 4d77db319ff85685ae79361b8d024565d09018a6 | 403,035 |
def after(trace):
"""
Get the after- relations given a list of activities
Parameters
--------------
trace
List activities
Returns
--------------
rel
After- inside the trace
"""
return set((trace[i], trace[j]) for i in range(len(trace)) for j in range(len(trace)) if j > i) | e24a6029abc288296d79dd10636d419a1d1ac980 | 323,901 |
def get_range(coordinates, axis, buffer):
"""
Based on lat/lon of the underlying data, get a range which defines the
bounding box for the map
Parameters
----------
coordinates: xarray DataArray
Coordinate data of the underlying data
axis: str
axis of the data (lat/lon) by which to slice the coordinates DataArray
buffer: float
value, expressed as a fraction of the range of the underlying data,
to add as a buffer to the bounding box
Returns
-------
list, defining the range as input_range +/- input_range * buffer
"""
_range = [
coordinates.loc[dict(coordinates=axis)].min().item(),
coordinates.loc[dict(coordinates=axis)].max().item(),
]
_offset = abs(_range[1] - _range[0]) * 0.1
return [_range[0] - _offset, _range[1] + _offset] | 97eb6353815b1b1c534e426852acbfc7bef52999 | 242,440 |
def _format_tags(line: list | dict, slice_: list) -> str:
"""
Create or format tags.
Parameters
----------
line: Line to add a tag.
slice_: Tag interval.
Returns
-------
str: formatted html tag
Examples
--------
>>> from pymove.visualization.folium import _format_tags, plot_points
>>> move_df.head()
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984198 116.319322 2008-10-23 05:53:06 1
2 39.984224 116.319402 2008-10-23 05:53:11 1
3 39.984211 116.319389 2008-10-23 05:53:16 1
4 39.984217 116.319422 2008-10-23 05:53:21 1
>>> _format_tags(
>>> line={
>>> 'lat': 39.984094,
>>> 'lon': 116.319236,
>>> 'datetime': '2008-10-23 05:53:05',
>>> 'id': 1
>>> },
>>> slice_=['lat', 'lon', 'datetime', 'id']
>>> )
lat: 39.984094<br/>lon: 116.319236<br/>datetime: 2008-10-23 05:53:05<br/>id: 1
"""
map_formated_tags = map(lambda tag: f'{tag}: {line[tag]}', slice_)
return '<br/>'.join(map_formated_tags) | cf9af9374c1413250724ce19601fb871abec8598 | 405,990 |
def normalize_df(series):
"""convenience function to normalize a signal
(i.e., rescale to range from 0 to 1)
"""
series_norm = (series - series.min()) / (series.max() - series.min())
return series_norm | 137433edddad2ef8e36a3369f194ff56e1015a13 | 657,391 |
def element_strip(elem):
"""formatting of element name to match colnames of reference material"""
elem = elem.replace('(LR)', '').replace('(MR)', '').replace('(HR)', '')
elem = ''.join([c for c in elem if c.isalpha()])
return elem | 747b4d69a73979dff4769c07dffe51b9a0cb0771 | 428,667 |
import json
def is_serializable(data):
"""Check if data is serializable."""
try:
json.dumps(data)
return True
except TypeError:
return False | eb86738d145d8ef92b76fc103ad4eb048260c7c5 | 220,836 |
def f_to_c(temp_f):
"""Convert F to C."""
return (temp_f - 32) * 5 / 9 | 0947b4286130389532eb2a8c03c5c55a9dd1650d | 527,612 |
def semi_angle_limit(Lambda, C_3=1):
"""
Convergence semi-angle if the probe size is diffraction limited.
Params:
Lambda: electron wavelength in Å
C_3: spherical aberration constant in mm, default 1.
Returns:
Convergence semi-angle in mrad.
"""
C_3 *= 1.e7
return 1.51 * C_3 ** (-1. / 4) * Lambda ** (1. / 4) * 1e3 | 3bd98406bd10f0e6c516301cd73c998948b653f1 | 572,816 |
def send_and_exhaust(iterator, arg, default):
"""Send a single value to a coroutine, exhaust it, and return the final
element or a default value if it was empty."""
# Python's coroutine syntax is still a bit rough when you want to do
# slightly more complex stuff. Watch this logic closely.
output = default
try:
output = iterator.send(arg)
except StopIteration:
pass
for output in iterator:
pass
return output | 8dc99fc265b70ffdf1fc0cf5e28247917d4957b9 | 55,466 |
def not_empty_string(val: str):
"""Returns true if the string is not empty (len>0) and not None"""
return isinstance(val, str) and len(val) > 0 | c03179dc559c07a5a5dd149a623548e07888be1d | 513,062 |
def infile_to_yaml(yaml_schema, infile_schema, infile_struct):
"""Transform elements in a SOG Fortran-ish infile data structure
into those of a SOG YAML infile data structure.
:arg yaml_schema: SOG YAML infile schema instance
:type yaml_schema: :class:`YAML_Infile` instance
:arg infile_schema: SOG Fortran-ish infile schema instance
:type infile_schema: :class:`SOG_Infile` instance
:arg infile_struct: SOG Fortran-ish infile data structure
:type infile_struct: nested dicts
:returns: SOG YAML infile data structure.
:rtype: nested dicts
"""
def get_element(node, key):
return infile_schema.get_value(
infile_struct, '{0.infile_key}.{1}'.format(node, key))
def transform(node):
result = {
'value': get_element(node, 'value'),
'description': str(get_element(node, 'description')),
'variable name': node.var_name,
}
units = get_element(node, 'units')
if units is not None:
result['units'] = str(units)
return result
def walk_subnodes(node):
result = {}
if not any(child.children for child in node.children):
return transform(node)
else:
for child in node.children:
result.update({child.name: walk_subnodes(child)})
return result
result = {}
for node in yaml_schema:
result.update({node.name: walk_subnodes(node)})
return result | b70ff33efd26ebe3bea4a9d549da211e1e40fedd | 56,168 |
def format_results_by_model(results, model):
"""
Format results dictionary to print different calculated model parameters,
depending on model used.
:return Multiline string of model parameter name: values.
"""
text = ''
if model == 'exponential':
for i in range(results['numberOfSegments']):
text += 'Segment {} Bt: {:.3f}\n'.format(
i, results['segmentBts'][i])
text += 'Segment {} Coefficient: {:.3f}\n'.format(
i, results['segmentCoefficients'][i])
text += 'Segment {} Exponent: {:.3f}\n'.format(
i, results['segmentExponents'][i])
text += 'Segment {} Limit: {:.3f}\n'.format(
i, results['segmentLimits'][i + 1])
text += 'Segment {} Volume: {:.1f}\n'.format(
i, results['segmentVolumes'][i])
elif model == 'power_law':
text += 'Coefficient: {:.3f}\n'.format(results['coefficient'])
text += 'Exponent: {:.3f}\n'.format(results['exponent'])
text += 'Suggested Proximal Limit: {:.1f}\n'.format(
results['suggestedProximalLimit'])
elif model == 'weibull':
text += 'k: {:.3f}\n'.format(results['k'])
text += 'lambda: {:.0f}\n'.format(results['lambda'])
text += 'theta: {:.5f}\n'.format(results['theta'])
text += 'MRSE of fit: {:.03f}\n'.format(results['mrse'])
text += 'Total Volume: {:.2f}\n'.format(results['estimatedTotalVolume'])
return text | 20ad861b1b35fc2b13f237a43242c028689ae490 | 298,296 |
def string_and_strip(*items):
"""Converts items to strings and strips them."""
return [str(i).strip() for i in items] | 2fec8b659a08e96411702af566ef04e382d36eef | 231,201 |
import crypt
def generate_mosquitto_user_line(username, password):
"""Generates a line for a mosquitto user with a crypt hashed password
:username: username to use
:password: password that will be hashed (SHA512)
:returns: a line as expected by mosquitto
"""
password_hash = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
line = f"{username}:{password_hash}"
return line | 7b9b3b52306233b91e20665f62a7d203b543b90f | 683,192 |
def _GetOpenfoamVersion(vm):
"""Get the installed OpenFOAM version from the vm."""
return vm.RemoteCommand('echo $WM_PROJECT_VERSION')[0] | f9ce01a35acd585da33db08c2f5f2c1a687ad8cc | 461,764 |
def parse_dbotu_parameters(summary_obj, amplicon_type):
"""
Parses summary file for dbOTU options.
Parameters
----------
summary_obj SummaryParser object
amplicon_type '16S' or 'ITS'
Returns
-------
dist max sequence dissimilarity (default = 0.1)
abund minimum fold difference for comparing two OTUs (0=no abundance criterion; default 10.0)
pval minimum p-value for merging OTUs (default: 0.0005)
"""
if amplicon_type == "16S":
try:
dbotu_flag = summary_obj.attribute_value_16S['DBOTU']
except:
dbotu_flag = 'False'
try:
dist = summary_obj.attribute_value_16S['DISTANCE_CRITERIA']
except:
dist = 0.1
try:
abund = summary_obj.attribute_value_16S['ABUNDANCE_CRITERIA']
except:
abund = 10.0
try:
pval = summary_obj.attribute_value_16S['DBOTU_PVAL']
except:
pval = 0.0005
elif amplicon_type == "ITS":
try:
dbotu_flag = summary_obj.attribute_value_ITS['DBOTU']
except:
dbotu_flag = 'False'
try:
dist = summary_obj.attribute_value_ITS['DISTANCE_CRITERIA']
except:
dist = 0.1
try:
abund = summary_obj.attribute_value_ITS['ABUNDANCE_CRITERIA']
except:
abund = 10.0
try:
pval = summary_obj.attribute_value_ITS['DBOTU_PVAL']
except:
pval = 0.0005
else:
raise NameError("Incorrect amplicon type specified for dbOTU summary file parsing")
return dbotu_flag, dist, abund, pval | 711db64a54c335c05ff31f5149490359a233e33f | 612,884 |
import socket
def test_ssh(host, throw=False):
"""
test ssh connection to specified `host`
Useful utility to ensure if newly created instance is sshable
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(1)
sock.connect((host, 22))
return True
except socket.timeout:
if throw:
raise
except socket.error as e:
if throw or e.errno != 111:
raise
finally:
sock.close()
return False | 38799c91628d8784d0c59672fc73dd2b2ac7b1fd | 555,583 |
import re
def cleanDateString(dateString):
"""
Removes characters in date string that will be disregarded when interpreting the date
>>> cleanDateString("[18]42")
'1842'
>>> cleanDateString("vermutlich um 1900")
'vermutlich um 1900'
"""
s = re.sub('\[|\]', '', dateString)
return s | e1403d4508cd190c2abb29ae08d54ed42c5fff98 | 380,241 |
import re
def get_ccs_pf_filters(ccs_root):
"""Returns list of PF Filters installed with passed ccs installation
Args:
ccs_root (str): full path to root of ccs installation
Returns:
list: list of PF Filters (strings) installed in ccs installation
"""
pf_filters = list()
with open(ccs_root + '/eclipse/ccs.properties') as f:
lines = f.readlines()
for line in lines:
match = re.match("^PF_FILTERS=([a-zA-Z0-9\,]*)", line, flags=re.IGNORECASE)
if match:
pf_filters = match.group(1).split(',')
break
return pf_filters | f52e8727d44913125033cc6df0ba33e4538f7779 | 312,321 |
from functools import reduce
def calculate_average(items):
"""Calculates average of items list"""
total = reduce((lambda x,y: x+y), items)
return total/len(items) | 8af12e3ddc97dd53d365f62bcf52f34fc139d72f | 261,669 |
import functools
def public(func):
"""
Decorator to declare which methods are publicly accessible as HTTP
requests
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
return func(*a, **kw)
return wrapped | c1a8dc792bcc22fa82119b397bf34d368672eb59 | 504,001 |
def assay_transpose(assay_df):
"""
assay_t_df = assay_transpose(assay_df)
Parameters
----------------
assay_df: dataframe of assay data with genes as rows and patients as columns
Returns
----------------
dataframe transposed with patients as rows and genes as columns
"""
assay_pt_df = assay_df.T
assay_pt_df.columns = assay_pt_df.iloc[0]
assay_pt_df = assay_pt_df.iloc[1:]
assay_pt_df['entity_submitter_id'] = list(assay_pt_df.index)
# assay_pt_df['TARGET USI'] = list(assay_pt_df.index)
# assay_pt_df['TARGET USI'] = assay_pt_df['TARGET USI'].apply(lambda x: x[0:16])
# assay_pt_df['2nd ID'] = list(assay_pt_df.index)
# assay_pt_df['2nd ID'] = assay_pt_df['2nd ID'].apply(lambda x: x[16:])
return assay_pt_df | 29e7ebb1c9a62a9f008acfbcd3318be23dc2bfec | 603,468 |
def links_to_array(p):
"""Extract links from page"""
res = []
all = p.xpath('//a')
for tag in all:
if 'href' not in tag.attrib:
continue
else:
item = {'href' : tag.attrib['href'], 'text' : tag.text_content()}
res.append(item)
return {'total' : len(res), 'list' : res} | 7dd8d0e4dadf788366221ce1de99e8659b96282d | 438,210 |
def is_pipable(line):
"""Filter for pipable reqs."""
if "# not_pipable" in line:
return False
elif line.startswith('#'):
return False
else:
return True | bf1bbeae2844e77cd379536ee44488c3da1bb246 | 518,013 |
from typing import List
def calc_diff(arr1: List[List[str]], arr2: List[List[str]]) -> List[int]:
"""Calculate positionwise difference of two List of List of iterables(eg. List of List of strings). This is used for plotting the avalanche effect induced during each round of DES."""
diff = []
for i in range(len(arr1[0])):
cnt = 0
for j in range(len(arr1)):
for (c,d) in zip(arr1[j][i], arr2[j][i]):
if(c != d):
cnt += 1
diff.append(cnt)
return diff | 88c38b023bffdca7ec50e517017aac9dc97a6a99 | 188,020 |
def nvmf_subsystem_allow_any_host(client, nqn, disable, tgt_name=None):
"""Configure a subsystem to allow any host to connect or to enforce the host NQN list.
Args:
nqn: Subsystem NQN.
disable: Allow any host (true) or enforce allowed host list (false).
tgt_name: name of the parent NVMe-oF target (optional).
Returns:
True or False
"""
params = {'nqn': nqn, 'allow_any_host': False if disable else True}
if tgt_name:
params['tgt_name'] = tgt_name
return client.call('nvmf_subsystem_allow_any_host', params) | 3d0c520a0a5e1a3c20a00eceb145788839c50d7e | 132,930 |
def find_out_stocks(last_phase_codes, this_phase_codes):
"""
找到上期入选本期被调出的股票,这些股票将必须卖出
:param last_phase_codes: 上期的股票列表
:param this_phase_codes: 本期的股票列表
:return: 被调出的股票列表
"""
out_stocks = []
for code in last_phase_codes:
if code not in this_phase_codes:
out_stocks.append(code)
return out_stocks | dc1f859b20c29bc3ddce528ef2b30da0e816b61d | 561,492 |
import torch
def gamma_pdf(alpha, beta, x):
"""
PDF for a Gamma(alpha, beta) distributed RV.
"""
return torch.pow(beta, alpha) / torch.exp(torch.lgamma(alpha)) \
* torch.pow(x, alpha - 1) * torch.exp(-beta * x) | bd35bc48504014120e6497a2825a9bf77d01c0af | 586,820 |
def get_direction_letters(d):
"""
Calculates the 16 point abbreviated representation of a numerical bearing
:param d: a float representing a compass bearing
:return: a string containing the 16 point bearing in letters
"""
d_letters = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW')
x = int((d + 11.25) / 22.5) % 16
return d_letters[x] | bf04a9ca8be57ee70ce7d399012eb3cf8997805c | 262,460 |
def _calc_gfloat_params_backward(op, *grads):
"""Gradients for the CalcGfloatParams op."""
return None | 01ad275e92ba829aa05eef38e831f81cc1d59999 | 593,624 |
from pathlib import Path
from typing import Set
def get_unit_types(unit_directory: Path) -> Set[str]:
"""Returns the unit types found in a directory"""
unit_filenames = unit_directory.glob("*")
unit_types = set([Path(filename).stem.split("_")[0] for filename in unit_filenames])
return unit_types | 8958d67903bcc5da43237e0183f35719a25f37f9 | 253,538 |
def _build_qualifier_filters(options):
"""
Build a dictionary defining the qualifier filters to be processes from
their definitons in the Click options dictionary. There is an entry
in the dictionary for each qualifier filter where the key is the
association name and the value is True or False depending in the
value of the option ('x' or 'no-x' )
"""
qualifier_filters = {}
if options['association'] is not None:
# show_assoc = options['association']
qualifier_filters['Association'] = options['association']
if options['indication'] is not None:
qualifier_filters['Indication'] = options['indication']
if options['experimental'] is not None:
qualifier_filters['Experimental'] = options['experimental']
return qualifier_filters | e1e7c9e8314c7b6a98ded7cf12e82f7730649aab | 235,108 |
import torch
def one_hot_embedding(labels, num_classes):
"""
Embedding labels to one-hot form.
Args:
:param labels: (LongTensor) class label, sized [N,].
:param num_classes: (int) number of classes.
:return:
(tensor) encoded labels, size [N, #classes].
"""
y = torch.eye(num_classes) # [D, D]
return y[labels] | 9a3ed1d8b11d1b1f98ed8e354e879ee70a6e84e3 | 51,834 |
import requests
def get_random_quote(quotes_api_url, quotes_api_key):
""" Get a random quote from quotes api. """
headers = {"Authorization": "Bearer {}".format(quotes_api_key)}
res = requests.get(quotes_api_url + "/quotes/random", headers=headers)
data = res.json()
return data | 3e1ab3052f9b8e7f8c5167de1b3f89f6353d9755 | 623,518 |
def find_legal_form(legal_forms, name):
"""Find name if it contains any of legal forms
Args:
legal_forms (list[str]): ['株式会社', '有限会社', '合同会社']
name (str): 'TIS株式会社'
Return:
if find, ['株式会社']
else, []
"""
for legal_form in legal_forms:
if legal_form in name:
return legal_form
return '' | ef5e41ac72bdf89d6fd7f1441c59d9df5a90d4f0 | 102,077 |
import re
def get_number(string_nums):
"""
:param string_nums: string, where to look up numbers(integer)
:return: number or None if number(integer) is not found
"""
match = re.match(r".*([\d]{1,})", string_nums)
if match:
return int(match.group(1))
return None | 0a012f2aba480a0a299c05b7f1951bbd3a4febbb | 255,600 |
import typing
def _tdec(code: str, unit: typing.Optional[str] = 'C') -> typing.Optional[str]:
"""
Translates a 4-digit decimal temperature representation
Ex: 1045 -> -4.5°C 0237 -> 23.7°C
"""
if not code:
return None
ret = f"{'-' if code[0] == '1' else ''}{int(code[1:3])}.{code[3]}"
if unit:
ret += f'°{unit}'
return ret | 1f1b047a5ba9707e1c9eff21cf27cca380f42f83 | 424,728 |
def string2color(string:str) -> str:
""" Generate more or less unique color for a given string """
h = 0
for char in string:
h = ord(char) + ((h << 5) - h)
return hex(h & 0x00FFFFFF) | 29d0d849c37951bd41e9b64b2bf378824354c964 | 280,698 |
Subsets and Splits