content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def inside_image(x, y, im_info): """ check if a point is in the image """ return x >= 0 and y >= 0 and x < im_info[1] and y < im_info[0]
f27315f5e7275d55f941a9614600440c1a904fa0
148,928
def search(array, value, dir="-"): """ Searches a sorted (ascending) array for a value, or if value is not found, will attempt to find closest value. Specifying dir="-" finds index of greatest value in array less than or equal to the given value. Specifying dir="+" means find index of least value in array greater than or equal to the given value. Specifying dir="*" means find index of value closest to the given value. """ if value < array[0]: if dir == "+": return 0 else: raise IndexError(f"No value found before {value}.") if value > array[-1]: if dir == "-": return len(array) - 1 else: raise IndexError(f"No value found after {value}.") J = 0 K = len(array) - 1 while True: if value == array[J]: return J elif value == array[K]: return K elif K == J + 1: if dir == "-": return J elif dir == "+": return K elif dir == "*": return min((J, K), key=lambda n: abs(n - value)) N = (J + K)//2 if value < array[N]: K = N elif value > array[N]: J = N elif value == array[N]: return N
9284ed8f826f3a472d9149531b29f12a8875870c
41,739
import zlib def adler32(filepath,blocksize=2**20): """ Return the ader32 of a file as an 8-byte hex number `blocksize` adjusts how much of the file is read into memory at a time. This is useful for large files. 2**20 = 1024 * 1024 = 1 mb 2**12 = 4 * 1024 = 4 kb """ csum = 1 with open(filepath, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: csum = zlib.adler32(buf,csum) buf = afile.read(blocksize) # From the documentation: # > Changed in version 3.0: Always returns an unsigned value. # > To generate the same numeric value across all Python versions and # > platforms, use crc32(data) & 0xffffffff. csum = csum & 0xffffffff return ('0'*8 + hex(csum)[2:])[-8:]
9e299e752664dc71a29a6393331751d49327ecc1
49,211
def format_zip(field): """Return str version of 5 digit zip. Examples: format_zip('08544-4320') >>> '08544' format_zip('085444320') >>> '08544' format_zip(85444320) >>> '08544' format_zip(851) >>> '00851' :param field: raw text """ if field is None or not field: return None field = str(field).split('-')[0].split(' ')[0].split('.')[0] if len(field) <= 5: return field.zfill(5) if field.isdigit() else None elif len(field) <= 9: field = field.zfill(9)[:5] return field if field.isdigit() else None return None
5bd01325e47f5f464c3828f4d76adab508322f1d
215,364
def get_show_columns(database): """ Gets the query of SHOW COLUMNS for a given database. :type str :param database: A database name :rtype str :return A query """ return ("SELECT `TABLE_NAME`, `COLUMN_NAME` " " FROM " "`information_schema`.`COLUMNS`" " WHERE " "`TABLE_SCHEMA` = '{:s}'").format(database)
e93d9ec70986d1895d1a107acdc7674f46e4de68
293,551
def isvar(sym): """ Checks whether a symbol in a structure is a variable, e.g., X,Y,Z etc. """ if isinstance(sym, str): return sym.isupper() else: return False
a0a11565fd4bd6dc15d926c7364cf9c746bcebce
479,686
def predict(features, model): """ Generate predictions from features and model. .. versionadded:: 0.5.0 Parameters ---------- features : ndarray Features from which to generate predictions model : ndarray Regression model. Returns ------- predicted : ndarray Predictions generated from features using model. """ return features.dot(model)
628d427ff363ac43cd65a98db22fa70598e522b6
162,024
def _entity_list_as_bel(entities): """Stringify a list of BEL entities. :type entities: iter[BaseAbundance] :rtype: str """ return ', '.join( e.as_bel() for e in entities )
1562235267a12006334dc8688f5a6b110a0f724e
398,959
def is_named_tuple_type(typ) -> bool: """ True if the argument corresponds to a named tuple type. Calling the function `collections.namedtuple` gives a new type that is a subclass of `tuple` (and no other classes) with a member named `_fields` that is a tuple whose items are all strings. """ b = typ.__bases__ if len(b) != 1 or b[0] != tuple: return False f = getattr(typ, "_fields", None) if not isinstance(f, tuple): return False return all(type(n) == str for n in f)
b27003183383198f2c4d0cac0d13cfef3e8a7f7e
241,457
def left_padded_string(text, padding=0): """ Method returns a modified version of `text` with `padding` number of space characters prepended. :param text: a string to prepend spaces to :param padding: the number of space characters to prepend to `text` :return: a modified version of `text` with `padding` number of space characters prepended. """ try: padding = int(padding) except ValueError: padding = 0 padding = 0 if padding < 0 else padding return ' ' * padding + text
0df82b71b8d8fb5ec948bdcba8e3ca9f7e6ef98f
261,207
def transpose_func(classes, table): """ Transpose table. :param classes: confusion matrix classes :type classes: list :param table: input confusion matrix :type table: dict :return: transposed table as dict """ transposed_table = {k: table[k].copy() for k in classes} for i, item1 in enumerate(classes): for j, item2 in enumerate(classes): if i > j: temp = transposed_table[item1][item2] transposed_table[item1][item2] = transposed_table[item2][item1] transposed_table[item2][item1] = temp return transposed_table
6c59feef2b735076c5768e086ef2e91331b78a73
29,450
def transformDiffCost(criterion, frRow, exRow): """Returns the absolute difference between their image through the 'transform' dict, normalized""" t = criterion['transform'] q = criterion['QLabel'] return abs(t[frRow[q]] - t[exRow[q]]) / max(t.values())
0e7463642355b74aee42e90bd2baac6baee43eff
329,303
def _make_minimize_function(scoring_function): """Create a minimize function. Given a maximization ``scoring_function`` convert it to minimize in order to work with ``hyperopt``, as ``benchmark`` works with ``maximization``. Also ``hyperopt`` params are being passed as a python ``dict``, we pass those as ``kwargs`` to the ``scoring_function``. """ def minimized_function(params): return -scoring_function(**params) return minimized_function
017370cbc9446229a45de3126ea29974ab551737
257,730
import shutil def clone_file(src, dest): """Clones a file from the source to the destination Returns: dict: indicates whether the clone operation threw an error """ try: shutil.copy(src, dest) return {'is_successful': True} except OSError: return {'is_successful': False}
024a5df1c9c79e572c1a41f4941ef1b63608022f
479,459
from typing import List def find_positions(arr: List[str], mask: List[bool]) -> List[int]: """Set positions and tokens. Args: tokens: List of tokens and untokens. mask: Mask for tokens. Returns: List of positions of tokens. """ pos = [] for i, (token, istoken) in enumerate(zip(arr, mask)): if istoken: pos.append(i) return pos
d89a8ab51c4437c55c666b3670f9e09a13e478a5
648,075
def transform_point(point, direction, value): """ Moves point in direction :param point: Point for movement :type point: DB.XYZ :param direction: Direction of movement. Vector :type direction: DB.XYZ :param value: The amount of movement :type value: int or float :return: New moved point :rtype: DB.XYZ """ return point + direction * value
996c4978be165b31b3b60c24cd6927840f6ced94
38,474
def _gcs_uri_rewriter(raw_uri): """Rewrite GCS file paths as required by the rewrite_uris method. The GCS rewriter performs no operations on the raw_path and simply returns it as the normalized URI. The docker path has the gs:// prefix replaced with gs/ so that it can be mounted inside a docker image. Args: raw_uri: (str) the raw GCS URI, prefix, or pattern. Returns: normalized: a cleaned version of the uri provided by command line. docker_path: the uri rewritten in the format required for mounting inside a docker worker. """ docker_path = raw_uri.replace('gs://', 'gs/', 1) return raw_uri, docker_path
6e476860cb175dd2936cc0c080d3be1d09e04b77
709,845
def final_nonzero(L): """ Return the index of the last non-zero value in the list. """ for index, val in reversed(list(enumerate(L))): if val: return(index) return(0)
1064987732146a9f6c12a2cab1dc84d2657fa321
17,052
def convertRange(val: float, old: tuple, new: tuple): """ Converts the range of a value to a new range. Example ------- convertRange(50, (0, 100), (0, 1)) >> 0.5 """ return (((val - old[0]) * (new[1] - new[0])) / (old[1] - old[0])) + new[0]
7308b6c45b3ef587aab41e970108a1dc9e31e726
56,919
import functools import time import logging def retry_on_exception(exceptions, timeout=300): """Retry function in case of exception(s).""" def _retry(f): @functools.wraps(f) def wrapper(*args, **kwargs): delay = 1 backoff = 2 tstart = time.time() while True: try: return f(*args, **kwargs) except exceptions: if timeout is not False and time.time() - tstart > timeout: raise logging.warning( f'Function {f.__name__} failed: retrying in {delay}s') time.sleep(delay) delay *= backoff return wrapper return _retry
422d2cbda47cdf974586f48a3156182dbb23b9b5
281,438
def _profile_exclusion(matching_profiles, logger): """Find out most matching profile byt host, task and family match. Profiles are selectively filtered. Each item in passed argument must contain tuple of (profile, profile's score) where score is list of booleans. Each boolean represents existence of filter for specific key. Profiles are looped in sequence. In each sequence are profiles split into true_list and false_list. For next sequence loop are used profiles in true_list if there are any profiles else false_list is used. Filtering ends when only one profile left in true_list. Or when all existence booleans loops passed, in that case first profile from remainded profiles is returned. Args: matching_profiles (list): Profiles with same scores. Each item is tuple with (profile, profile values) Returns: dict: Most matching profile. """ logger.info( "Search for first most matching profile in match order:" " Host name -> Task name -> Family." ) if not matching_profiles: return None if len(matching_profiles) == 1: return matching_profiles[0][0] scores_len = len(matching_profiles[0][1]) for idx in range(scores_len): profiles_true = [] profiles_false = [] for profile, score in matching_profiles: if score[idx]: profiles_true.append((profile, score)) else: profiles_false.append((profile, score)) if profiles_true: matching_profiles = profiles_true else: matching_profiles = profiles_false if len(matching_profiles) == 1: return matching_profiles[0][0] return matching_profiles[0][0]
11d39198470d679808a6fc0c3246ebd06e5d7f16
460,373
def _name(node): """Get the name of a node.""" return type(node).__name__
215a5e3c62fe3cba6b4f40dfbd470c2451fdbf75
526,996
def list_projection(values, columns): #--------------------------------------<<< """Return a comma-delimited string containing specified values from a list. values = list of values. (E.g., as returned from a csv.reader().) columns = list of indices (0-based) for the columns that are to be included in the returned line. Returns a comma-delimited text string containing only the desired columns in the order specified in the passed list. """ returned = [] for column in columns: returned.append(values[column]) return ','.join(returned)
6998f5c815309f70d17cda9e58c026a62112ee80
341,424
import requests import math def get_search_page_numbers(url, token): """ Get the pagination information for the request. Parameters ---------- url : str The OSF url token : str OSF authorization token Returns ------- A dictionary of page numbers """ headers = {"Authorization": "Bearer {}".format(token)} pagination_info = requests.get(url, headers=headers).json()['links'] next_page = pagination_info['next'] previous_page = pagination_info['prev'] if next_page: next_page = next_page.partition('page=')[2] if previous_page: previous_page = previous_page.partition('page=')[2] if previous_page == '': previous_page = '1' total_pages = math.ceil(pagination_info['meta']['total']/pagination_info['meta']['per_page']) pages = { "first_page": '1', "previous_page": previous_page, "next_page": next_page, "last_page": str(total_pages), "total_pages": str(total_pages), "per_page": pagination_info['meta']['per_page']} return pages
f868f3edf00141825950b9aee2f2ea2ab74672ca
129,658
def kwargs_to_variable_assignment(kwargs: dict, value_representation=repr, assignment_operator: str = ' = ', statement_separator: str = '\n', statement_per_line: bool = False) -> str: """ Convert a dictionary into a string with assignments Each assignment is constructed based on: key assignment_operator value_representation(value) statement_separator, where key and value are the key and value of the dictionary. Moreover one can seprate the assignment statements by new lines. Parameters ---------- kwargs : dict assignment_operator: str, optional: Assignment operator (" = " in python) value_representation: str, optinal How to represent the value in the assignments (repr function in python) statement_separator : str, optional: Statement separator (new line in python) statement_per_line: bool, optional Insert each statement on a different line Returns ------- str All the assignemnts. >>> kwargs_to_variable_assignment({'a': 2, 'b': "abc"}) "a = 2\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a':2 ,'b': "abc"}, statement_per_line=True) "a = 2\\n\\nb = 'abc'\\n" >>> kwargs_to_variable_assignment({'a': 2}) 'a = 2\\n' >>> kwargs_to_variable_assignment({'a': 2}, statement_per_line=True) 'a = 2\\n' """ code = [] join_str = '\n' if statement_per_line else '' for key, value in kwargs.items(): code.append(key + assignment_operator + value_representation(value)+statement_separator) return join_str.join(code)
aec76c6a7b1e29c9540b0cb2a8161f831d2058de
71,454
def get_policy_published(reportee, policy, **kw): """Return "policy_published" part for a passed `reportee` and `policy`. For now we use the hardcoded values for `adkim`, `aspf` and `pct` everywhere. """ return { "domain": reportee, "adkim": "r", "aspf": "r", "p": policy, "sp": policy, "pct": 100 }
a5be8da2777421b6596665718ffc25e4ad5b8e55
497,692
def is_int(object): """Check if the given object is an integer""" return isinstance(object, int)
99aeb97e23db5ac6d83776b9e9c1172d7215972c
666,636
def params_to_string(*s): """gets a printable string for variable number of parameters.""" return ", ".join(s)
cd499a7c8bbd2dc32939ae0b86a5bcd8e9341b3f
235,960
def execute(context, *args): """dump_request() -> {"args": [...], "metadata": {...}} Returns the request args and metadata as a dict. """ return {"args": args, "metadata": context.metadata}
03f874c1e0f86126970cafbd715b6a6a699f90ee
325,240
import json def _build_body_as_bytes(channel, message): """return HTTP request body as bytes, utf-8 encoded""" data_dict = {"channel": channel, "text": message} data_str = json.dumps(data_dict) return data_str.encode()
7e85feee9af6b160e5edca26eb837b57b2888c88
516,923
def percentage(context, num, total_num): """ Works out the percentage of num over total_num and then appends the percentage sign """ p = float(num)/float(total_num) * 100 percent = str(p) + "%" return percent
66bb8db090a9f9cc01b34fda291055aeae731f1c
662,284
def iterPower(base, exp): """ base: int or float. exp: int >= 0 returns: int or float, base^exp """ res = 1 # <---- remember starting point while exp > 0: res *= base exp -= 1 return res
14c694e2702c468ab28362d224d8d89092c0cc79
207,480
def get_passive_el(passive_coord, centroids): """ Gets index of passive elements . Args: passive_coord (:obj:`tuple`): Region that the shape will not be changed. centroids (:obj:`numpy.array`): Coordinate (x,y) of the centroid of each element. Returns: Index of passive elements. """ mask = (centroids[:, 0] >= passive_coord[0][0]) & (centroids[:, 0] <= passive_coord[0][1]) & (centroids[:, 1] >= passive_coord[1][0]) & (centroids[:, 1] <= passive_coord[1][1]) return (mask > 0).nonzero()[0]
9983ee9d730ced9f8ce56790c07af22c9dfcdb0d
41,507
from importlib import import_module def get_class_from_route(route): """ From a Python class path in string format, returns the class """ values = route.split('.') module = import_module('.'.join(values[:-1])) cl = getattr(module, values[-1]) return cl
059da4f3062630a6906723d66e3869b7a347d04a
543,921
import pickle def load_pickle2dict(fname): """ Load the the IMGT database pickle file into a Dictionary structure """ with open(fname, 'rb') as fileHandle: return pickle.load(fileHandle)
9e765a72b64582f3180556d7aa445037c9d82126
129,897
import base64 def encode_b64(value): """Encode value to base64.""" if isinstance(value, str): value = bytes(value, "utf-8") return base64.b64encode(value).decode("utf-8")
847759cf46c7c9b4540038c44702ca772dbbccc7
193,871
def serialize_header(value, style='simple', explode=False): # noqa """ Serialize a header according to https://swagger.io/docs/specification/serialization/. Parameters ---------- value : Value to serialize style : str ('simple') Serialization style. explode : bool Explode the object serialization. Returns ------- str Serialized header. """ if type(value) is list: return ','.join([str(item) for item in value]) elif type(value) is dict: sep = '=' if explode else ',' return ','.join(['{}{}{}'.format(k, sep, v) for k, v in value.items()]) else: return str(value)
e5f355a8b60d85cf3a673b6ba126b76effc283c8
370,469
def get_PF_Results(trafo_vector_group): """ Returns the PF loadflow results of the validation network for the given trafo vector groups. For info about the test network see runpp_3ph Validation.pfd """ data_map = {"YNyn": 0, "Dyn": 1, "Yzn": 2} # Create lists to put in the PF loadflow results of the validation network bus_vm_pu = [[], [], []] # vm_pu -> L1 L2 L3 # Bus #1 ExtGrid # Bus #2 TR1_HV # Bus #3 TR1_LV # Bus #4 Load A1 # Bus #5 Load B1 # Bus #6 Load B3 # Bus #7 TR2_LV line_i_ka = [[], [], []] # i_ka -> L1_from, L2_from, L3_from, L1_to, L2_to, L3_to # Leitung #1 ExtGrid_TR1 # Leitung #2 TR1_LoadA1 # Leitung #3 TR2_LoadB1 # Leitung #4 TR2_LoadB3 trafo_i_ka = [[], [], []] # i_ka -> L1_hv, L2_hv, L3_hv, L1_lv, L2_lv, L3_lv # Trafo 1 # Trafo 2 bus_vm_pu[data_map['YNyn']] = [0.99999787338, 1.0000016161, 1.0000005105, 0.99968050244, 0.99969237855, 0.99963282587, 0.99705343554, 0.99746838183, 0.99718586395, 0.9948995116, 0.99458439962, 0.98674300489, 0.99395111068, 0.99418423529, 0.9863564418, 0.97850425826, 0.99234507124, 0.99521892713, 0.99742536231, 0.99837274406, 0.99814767609] line_i_ka[data_map['YNyn']] = [0.014003237709, 0.013578415289, 0.015018686616, 0.014052352411, 0.013615497541, 0.015055831313, 0.045318234753, 0.087462622072, 0.13170744039, 0.045439631098, 0.087508398891, 0.13172221072, 0.067006161071, 0.10938206657, 0.15377206679, 0.067102516557, 0.10942952686, 0.15379184651, 0.24518449462, 0.11090275423, 0.15370539768, 0.24521447354, 0.1109479239, 0.15373201528] trafo_i_ka[data_map['YNyn']] = [0.014052351784, 0.013615496912, 0.015055830683, 0.34745891513, 0.3364754264, 0.37259351695, 0.015806285559, 0.011022367484, 0.014341144957, 0.39152062339, 0.27144774298, 0.3546449925] bus_vm_pu[data_map['Dyn']] = [0.99999633365, 1.0000007177, 1.0000029487, 0.99966300361, 0.99968280267, 0.99965990087, 0.99703737513, 0.99745820246, 0.99721210683, 0.99488339811, 0.99457396301, 0.98676946744, 0.9939515775, 0.99418757133, 0.98635265387, 0.97850473318, 0.99234834683, 0.99521515379, 0.99742581829, 0.99837603162, 0.99814393136] line_i_ka[data_map['Dyn']] = [0.014110688122, 0.01373849924, 0.014763703519, 0.014164968965, 0.013770898582, 0.014800144789, 0.045318951491, 0.087463575817, 0.13170390089, 0.045440367054, 0.087509317164, 0.13171867828, 0.067006134761, 0.1093816954, 0.1537726556, 0.067102485041, 0.10942915967, 0.15379243712, 0.24518438107, 0.11090238404, 0.15370597872, 0.24521435453, 0.11094755768, 0.15373259815] trafo_i_ka[data_map['Dyn']] = [0.014164968341, 0.013770897951, 0.014800144159, 0.34746449192, 0.33647892194, 0.37258362522, 0.01492317272, 0.012146394959, 0.014229967227, 0.39152039065, 0.27144661055, 0.35464662852] bus_vm_pu[data_map['Yzn']] = [0.99999633365, 1.0000007177, 1.0000029487, 0.99966300361, 0.99968280267, 0.99965990087, 0.99703737513, 0.99745820246, 0.99721210683, 0.99488339811, 0.99457396301, 0.98676946744, 0.9939515775, 0.99418757133, 0.98635265387, 0.97850473318, 0.99234834683, 0.99521515379, 0.99742581829, 0.99837603162, 0.99814393136] line_i_ka[data_map['Yzn']] = [0.014110688122, 0.01373849924, 0.014763703519, 0.014164968965, 0.013770898582, 0.014800144789, 0.045318951491, 0.087463575817, 0.13170390089, 0.045440367054, 0.087509317164, 0.13171867828, 0.067006134761, 0.1093816954, 0.1537726556, 0.067102485041, 0.10942915967, 0.15379243712, 0.24518438107, 0.11090238404, 0.15370597872, 0.24521435453, 0.11094755768, 0.15373259815] trafo_i_ka[data_map['Yzn']] = [0.014164968341, 0.013770897951, 0.014800144159, 0.34746449192, 0.33647892194, 0.37258362522, 0.01492317272, 0.012146394959, 0.014229967227, 0.39152039065, 0.27144661055, 0.35464662852] return [bus_vm_pu[data_map[trafo_vector_group]], line_i_ka[data_map[trafo_vector_group]], trafo_i_ka[data_map[trafo_vector_group]]]
7a2bdda51129028bbb2978a02807e7d8076c0ea5
407,468
from typing import Iterable from typing import Dict from typing import Any def pb_obj2dict(obj, keys: Iterable[str]) -> Dict[str, Any]: """Convert a protobuf object to a Dict by selected keys :param obj: a protobuf object :param keys: an iterable of keys for extraction """ return {k: getattr(obj, k) for k in keys if hasattr(obj, k)}
50dcc3f81f58b61b8c83989069facba532e0c523
604,786
def _pad_sequences(sequences, pad_tok, max_length): """ Args: sequences: a generator of list or tuple. pad_tok: the char to pad with. Returns: a list of list where each sublist has same length. """ sequence_padded, sequence_length = [], [] for seq in sequences: seq = list(seq) seq_ = seq[:max_length] + [pad_tok] * max(max_length - len(seq), 0) sequence_padded += [seq_] sequence_length += [min(len(seq), max_length)] return sequence_padded, sequence_length
dac40aef95b39b3b549904c3981152cacbee494a
303,267
def static_vars(**kwargs): """ Attach static variables to a function. Usage: @static_vars(k1=v1, k2=k2, ...) def myfunc(...): myfunc.k1... Parameters: **kwargs Keyword=value pairs converted to static variables in decorated function. Returns: decorate """ def decorate(func): for k, v in kwargs.items(): setattr(func, k, v) return func return decorate
ee6690d76ca21efd682ef2ade0aa8b8046ec6a51
675,968
import json def _get_ha_state_from_json(string_json): """ Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state enumerations. :param string_json: the string JSON :return: the value of the HA state (active, standby, etc) """ json_data = json.loads(string_json) jmx_beans = json_data["beans"] # look for HDP 2.1+ first for jmx_bean in jmx_beans: if "name" not in jmx_bean: continue jmx_bean_name = jmx_bean["name"] if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean: return jmx_bean["State"] # look for HDP 2.0 last for jmx_bean in jmx_beans: if "name" not in jmx_bean: continue jmx_bean_name = jmx_bean["name"] if jmx_bean_name == "Hadoop:service=NameNode,name=FSNamesystem": return jmx_bean["tag.HAState"]
23dd2a547982a7fcc734a90b509caeff18fa90b3
610,543
import torch def attention_score(att, mel_lens, r=1): """ Returns a tuple of scores (loc_score, sharp_score), where loc_score measures monotonicity and sharp_score measures the sharpness of attention peaks """ with torch.no_grad(): device = att.device mel_lens = mel_lens.to(device) b, t_max, c_max = att.size() # create mel padding mask mel_range = torch.arange(0, t_max, device=device) mel_lens = mel_lens // r mask = (mel_range[None, :] < mel_lens[:, None]).float() # score for how adjacent the attention loc is max_loc = torch.argmax(att, dim=2) max_loc_diff = torch.abs(max_loc[:, 1:] - max_loc[:, :-1]) loc_score = (max_loc_diff >= 0) * (max_loc_diff <= r) loc_score = torch.sum(loc_score * mask[:, 1:], dim=1) loc_score = loc_score / (mel_lens - 1) # score for attention sharpness sharp_score, inds = att.max(dim=2) sharp_score = torch.mean(sharp_score * mask, dim=1) return loc_score, sharp_score
ccdce864a91c9816143f414c2cde99b5f67c89c4
35,411
def merge_configs(default_config, input_config): """Recursively merge configuration dictionaries""" result = {} for name in default_config: lhs_value = default_config[name] if name in input_config: rhs_value = input_config[name] if isinstance(lhs_value, dict): assert isinstance(rhs_value, dict) result[name] = merge_configs(lhs_value, rhs_value) else: result[name] = rhs_value else: result[name] = lhs_value for name in input_config: rhs_value = input_config[name] if isinstance(rhs_value, dict) and name in default_config.keys(): continue result[name] = rhs_value return result
2a2b9c6ad2fff813b9a4ccb87bb06b1679e8b9f8
311,506
def _shallow_dict_copy_without_key(table, key_to_omit): """Returns a shallow copy of dict with key_to_omit omitted.""" return {key: table[key] for key in table if key != key_to_omit}
349e79cffc48bba052c2c2f6cfdfdae60c068c29
135,929
def readfile(filename): """Given a filename, read a file in text mode. It returns a single string.""" filehandle = open(filename, 'r') outfile = filehandle.read() filehandle.close() return outfile
7d6cd96ec42cbcbac460268678b4d2e191a4afa3
558,456
import random def make_sentence(words_list, num_words): """Get the number of words requested by user randomly. Params: words_list(list): list of str to choose words from randomly num_words(int) Returns: sentence(list) """ sentence = list() for i in range(num_words): random_index = random.randrange(len(words_list) - 1) sentence.append(words_list[random_index]) return sentence
3a73a87b9cfd62c4973670a5de15db2e73bce5cc
175,073
def get_lineno(node, default=0): """Gets the lineno of a node or returns the default.""" return getattr(node, 'lineno', default)
b1d7fa39877e5befd6565c92640ce2e1b6f5e06d
174,518
def transition(config, rules): """Get next state of cell for a rule (Wolfram code). :param config: Neighbor configuration. :param rules: Rule for state transistion. :returns: Next state. :raises ValueError: If config is invalid. """ assert len(rules) == 8 if config == [1, 1, 1]: return rules[0] elif config == [1, 1, 0]: return rules[1] elif config == [1, 0, 1]: return rules[2] elif config == [1, 0, 0]: return rules[3] elif config == [0, 1, 1]: return rules[4] elif config == [0, 1, 0]: return rules[5] elif config == [0, 0, 1]: return rules[6] elif config == [0, 0, 0]: return rules[7] raise ValueError(f"Invalid config argument: {config}")
c9afd5a81bbb33810dfedbe62a4e31438ae22e95
129,726
import re def canonical(value): """Replace anything but 'a-z', 'A-Z' and '0-9' with '_'. """ return re.sub(r'[^a-zA-Z0-9]', '_', value)
dfd9b324a9c9ec273750e95a4bd7b1548af37108
687,472
def build_account_collapsed_json(financing_json, registrations_json): """Organize account registrations as parent/child financing statement/change registrations.""" for statement in financing_json: changes = [] for registration in registrations_json: if statement['registrationNumber'] == registration['baseRegistrationNumber']: changes.append(registration) if changes: statement['changes'] = changes return financing_json
d5016c5eae849cfed2603cc29e7cec81759d701c
499,821
def probability_of_improvement_sub(mu, std, target): """Sub function to compute the probability of improvement acquisition function. Args: mu: n x 1 posterior mean of n query points. std: n x 1 posterior standard deviation of n query points (same order as mu). target: target value to be improved over. Returns: Negative of target z-score as an equivalence of probability of improvement. """ gamma = (target - mu) / std return -gamma
9b9ffb8c990617016622891d63dd956428533992
361,604
import re def escape(token): """Escape characters that have special semantics within GraphViz""" pattern = re.compile(r'([\[\]()"\\])') return pattern.sub(r'\\\1', token)
bc94648e299f76f5ea5229d135d74dc75a28944e
258,036
def point_within_dimensions(point, image_dimensions): """Checks to see if a point falls inside an image's dimension. Works for any number of dimensions. Acceptable range is [0, dim) Args: point (np.array): array with the point's coordinates image_dimensions (np.array): array with the image dimensions Returns: bool: whether the point lies within the dimensions """ assert len(point) == len( image_dimensions ), "Point dimensions {} doesn't equal image dimension {}".format( len(point), len(image_dimensions) ) within_bounds = True for i, val in enumerate(point): within_bounds = within_bounds and 0 <= val < image_dimensions[i] return within_bounds
624b508fc5650e3f89d1523db655428b78545bc9
381,704
def convertlistToOpenMayaArray(inList, arrayType): """convert given list to an openmaya arraytype :param inList: list of objects to be added to arraytype :type inList: list :param arrayType: any openmaya array type :type arrayType: OpenMaya.<array> :return: the array filled with data :rtype: OpenMaya.<array> """ array = arrayType() for elem in inList: array.append(elem) return array
b4507c7355d9c7cedc871ba9a674b19df5009248
480,952
def get_and_update_or_create(model, unique, update): """ Given a model, a dictionary of lookup arguments, and a dictionary of update arguments, this convenience function gets an object and updates it in the database if necessary. Returns a tuple (object, int) where int is 0 if the object was not updated, 1 if the object was created, and 2 if the object was updated in the database. >>> resp = get_and_update_or_create(User, {'username': 'example'}, {'email': '[email protected]'}) >>> resp (<User: example>, 1) >>> resp[0].email '[email protected]' >>> resp = get_and_update_or_create(User, {'username': 'example'}, {'email': '[email protected]'}) >>> resp (<User: example>, 0) >>> resp[0].email '[email protected]' >>> resp = get_and_update_or_create(User, {'username': 'example'}, {'email': '[email protected]'}) >>> resp (<User: example>, 2) >>> resp[0].email '[email protected]' """ obj, created = model.objects.get_or_create(dict(unique, default=update)) # If we just created it, then the defaults kicked in and we're good to go if created: return obj, 1 # Iterate over all of the fields to update, updating if needed, and keeping # track of whether any field ever actually changed modified = False for name, val in update.iteritems(): if getattr(obj, name) != val: modified = True setattr(obj, name, val) # If a field did change, update the object in the database and return if modified: obj.save(force_update=True) return obj, 2 # Otherwise the object in the database is up to date return obj, 0
5c6c010ae7a38bb5d928b4b611954aae8bad5ffa
523,497
def tong_bp(n): """Tinh tong 1^2+2^2+3^2+...+n^2""" sum = 0 for i in range(1,n+1): sum+=i*i return sum
52f4a9e6ebea035233424f1fc4c77f3dc8a6a160
282,882
def cast_int_float_string(value): """ Cast given string value, if possible, to an int, float or returns unchanged. Parameters ---------- value : str or unicode Value to try casting to an int and float. Returns ------- int, float, str or unicode Cast *value*. """ try: return int(value) except ValueError: try: return float(value) except ValueError: return value
d1f9ae61ef1f5b950899fd7503b19c7f687f25bf
208,325
import six def get_first(predicate, source): """Searches for an item that matches the conditions. :param predicate: Defines the conditions of the item to search for :param source: Iterable collection of items :return: The first item that matches the conditions defined by the specified predicate, if found; otherwise StopIteration is raised """ return six.next(item for item in source if predicate(item))
cfaa1abd9dc70ca1fbc452e2a8f6678f8438af74
115,920
def split_1_grams_from_n_grams(topics_weightings): """ Pair every words with their weightings for topics into dicts, for each topic. :param topics_weightings: it is a 3D list of shape [topics, number_of_top_words, 2] where the 2 is two entries such as (top_word, top_words_weighting). :return: Two arrays similar to the input array where the 1-grams were splitted from the n-grams. The n-grams are in the second array and the 1-grams in the first one. """ _1_grams = [[] for _ in range(len(topics_weightings))] _n_grams = [[] for _ in range(len(topics_weightings))] for i, topic_words_list in enumerate(topics_weightings): for word, weighting in topic_words_list: tuple_entries = (word, weighting) if ' ' in word: _n_grams[i].append(tuple_entries) else: _1_grams[i].append(tuple_entries) return _1_grams, _n_grams
cdb058d4ad718d578e2e11ec3d4ae9eb008224b4
691,555
def readme() -> str: """ Reads the README file of the project to use it as long description. :return: The long description of Metrics. """ with open('README.md') as file: return file.read()
94300a69c03a5b6c0f5641538f8b06c20bbe5cda
277,714
def is_say_ingredients(text: str) -> bool: """ A utility method to determine if the user said the intent 'say_ingredients'. """ exact_match_phrases = [ "ingredient", "ingredients" ] sample_phrases = [ 'say ingredient', 'tell me the ingredient', 'what are the ingredient', 'tell the ingredient', 'say the ingredient', 'say ingredient', 'tell me ingredient', 'tell ingredient' ] return any(text == em for em in exact_match_phrases) or any(phrase in text for phrase in sample_phrases)
3de66c29ae2c7a35a5b3f71242bd958f1084e0de
87,314
from typing import Callable def mask_table() -> dict[int, Callable[[int, int], int]]: """Mask functions table. Returns: dict[int, Callable[[int, int], int]]: Dictionary of the form {mask number: lambda function} """ table = { 0: lambda x, y: (x + y) % 2, 1: lambda x, y: x % 2, 2: lambda x, y: y % 3, 3: lambda x, y: (x + y) % 3, 4: lambda x, y: (x//2 + y//3) % 2, 5: lambda x, y: (x*y) % 2 + (x*y) % 3, 6: lambda x, y: ((x*y) % 2 + (x*y) % 3) % 2, 7: lambda x, y: ((x*y) % 3 + (x+y) % 2) % 2 } return table
3f6216274baf659480d8dcebfb9146b9df973e07
539,802
def mapLists(first, second): """ Make a dictionary from two lists with elements of the first as the keys and second as values. If there are more elements in the first list, they are assigned None values and if there are more in the second list, they're dropped. """ index = 0 dict = {} # Read through every index of the @first list and build the map. while index < len(first): f = first[index] s = second[index] if index < len(second) else None dict[f] = s index += 1 return dict
928b31e0cf636389124f8ba6b4910b9a51539204
663,774
def get_season_archive_url(base_url: str) -> str: """Creates the URL for the season archive endpoint.""" return f"{base_url}/season/archive"
5ca241d304e0ba5a3175aed025b6851289fddda3
397,524
import warnings from tempfile import TemporaryDirectory as TmpDir def TemporaryDirectory(*args, **kwargs): # pylint: disable=invalid-name """ This function is deprecated. Please use `tempfile.TemporaryDirectory` """ warnings.warn( "This function is deprecated. Please use `tempfile.TemporaryDirectory`", DeprecationWarning, stacklevel=2 ) return TmpDir(*args, **kwargs)
a5a0cbead84c7b147d27793e073660435c825255
321,283
import pyarrow def _to_pyarrow(value): """Convert Python value to pyarrow value.""" return pyarrow.array([value])[0]
a050c047b8ea7e4510c34993e5ad9a436920abf9
74,657
from typing import Set def candidates_to_bits(candidates: Set[int]) -> int: """ Convert a candidates set into its bits representation The bits are in big endian order >>> bin(candidates_to_bits({1, 2, 3, 6, 7})) '0b1100111' >>> bin(candidates_to_bits({6, 9})) '0b100100000' """ bits = 0 for candidate in candidates: bits ^= 1 << (candidate - 1) return bits
0f69cb3515975687d1c83fabf690936667a6cf06
678,393
def unpack_singleton(x): """ Return original except when it is a sequence of length 1 in which case return the only element :param x: a list :return: the original list or its only element """ if len(x) == 1: return x[0] else: return x
0bffdcc339c593aafb1f657134da5d67fc538cbf
40,136
def parameter_init(model, name, shape, init): """Create parameter given name, shape and initiator Parameters ---------- name : str parameter name shape : tuple parameter shape init : mxnet.initializer an initializer Returns ------- mxnet.gluon.parameter a parameter object """ p = model.params.get(name, shape=shape, init=init) return p
687330b7189066a9ef34690b8df08df6aa08fb07
563,375
def _GetContactPrivacyEnum(domains_messages): """Get Contact Privacy Enum from api messages.""" return domains_messages.ContactSettings.PrivacyValueValuesEnum
2a33e0bc5ca9baebddafddbf7d0c844ed91bc1c0
124,560
def likes(list_of_names: list) -> str: """ >>> likes([]) 'no one likes this' >>> likes(["Python"]) 'Python likes this' >>> likes(["Python", "JavaScript", "SQL"]) 'Python, JavaScript and SQL like this' >>> likes(["Python", "JavaScript", "SQL", "JAVA", "PHP", "Ruby"]) 'Python, JavaScript and 4 others like this' """ if len(list_of_names) == 0: return "no one likes this" if len(list_of_names) == 1: return f"{list_of_names[0]} likes this" if len(list_of_names) == 2: return f"{list_of_names[0]} and {list_of_names[1]} like this" if len(list_of_names) == 3: return ( f"{list_of_names[0]}, {list_of_names[1]} and {list_of_names[2]} like this" ) else: return f"{list_of_names[0]}, {list_of_names[1]} and {len(list_of_names) - 2} others like this"
20d46cfcd319b7bef3733a86aa6e57769381d67b
121,882
def int_overlap(a1, b1, a2, b2): """Checks whether two intervals overlap""" if b1 < a2 or b2 < a1: return False return True
1ada12a2767cf0c709baa310446d710e976f607f
450,157
def get_datasets_with_substrings(datasets_list, name_substrings): """ Filters list of datasets with specified substrings (e.g. github usernames) in them :param datasets_list: list of dataset_ids :param name_substrings: identifies substrings that help identify datasets to delete :return: list of dataset_ids with any substring in their dataset_id """ datasets_with_substrings = [] for dataset in datasets_list: if any(name_substring in dataset for name_substring in name_substrings): datasets_with_substrings.append(dataset) return datasets_with_substrings
b612fec6fd733b93701b4a2334b69f880a76e030
145,598
def next_race_template() -> list: """Return template settings for next race.""" return [ { "round": "SA", "qualified": 0, "current_contestant_qualified": False, }, { "round": "SC", "qualified": 0, "current_contestant_qualified": False, }, { "round": "FA", "qualified": 0, "current_contestant_qualified": False, }, { "round": "FB", "qualified": 0, "current_contestant_qualified": False, }, { "round": "FC", "qualified": 0, "current_contestant_qualified": False, }, ]
5e05a7db32e502eae8d11d2d212ef93c4c1e3c3f
556,670
def is_authenticated_with_proxy(proxy): """Given a Proxy, checks whether a user is authenticated""" if proxy is None: return False elif proxy.has_authentication(): return proxy.get_authentication().is_valid() else: return False
c0476ec85fedb82ee77da6db18e52e2c4c39bfb5
293,021
import random def mcpi_samples(n): """ Compute the number of points in the unit circle out of n points. """ count = 0 for i in range(n): x, y = random.random(), random.random() if x*x + y*y <= 1: count += 1 return count
c6082155e6accc773be67ba35926bf33348b6fbf
687,290
def find_result_sentiment(words, word_sentiments): """Compute average sentiment for a result represented as a list of words. Words not in word_sentiments are ignored. If we don't have sentiment for any wd in words, return None. """ word_ratings = [word_sentiments[wd] for wd in words if wd in word_sentiments] if len(word_ratings) == 0: return None return sum(word_ratings) / len(word_ratings)
4dad76a4b986973d608c8952b4991a553267655a
530,932
from typing import List def repeat_prev(prev_vals: List[float]): """ Repeats the previous seen value again """ return prev_vals[-1]
026b89ad9f46e20b051f075cde56132fcbf69f7d
523,213
def key_gen(params): """Generates a fresh key pair""" _, g, o = params priv = o.random() pub = priv * g return (pub, priv)
c554fdcda209d591ac952ea43a69163f0448dd28
696,144
def conv_outdim(i_dim, k, padding=0, stride=1, dilation=1): """Return the dimension after applying a convolution along one axis""" return int(1 + (i_dim + 2 * padding - dilation * (k - 1) - 1) / stride)
56436dde53f4463494011a0fb1f6b3bfc93bbb0a
98,686
def letter_only(guess, abc): """ Checks if the player's guess is a single ASCII letter only. :param abc: the alphabet used in the proverbs :type abc: str :param guess: the player's guess :type guess: str :return: True | False :rtype: bool """ if len(guess) == 1 and guess.upper() in abc: return True return False
e6822eab984c6c8d0c15e103ced88bed348620cc
625,007
def get_query_id(query_results, query_type='track'): """ get id(s) for query results. if query type is set to track also returns preview url. :param query_results: json object :param query_type: str :return: id_list """ id_list = list() for item in query_results: album_name = item['album']['name'] album_image = item['album']['images'][2]['url'] artist_name = item['album']['artists'][0]['name'] song_name = item["name"] item_id = item["id"] if query_type == query_type: preview = item["preview_url"] id_list.append(( album_name, album_image, artist_name, song_name, item_id, preview )) else: id_list.append(( album_name, album_image, artist_name, song_name, item_id )) return id_list
2f9ae58b3a0fdeb03ff1fed78a83d82735fa2fd6
380,198
def get_default_params(dim: int) -> dict: """ Returns the default parameters of the Self-adaptive Differential Evolution Algorithm (SaDE). :param dim: Size of the problem (or individual). :type dim: int :return: Dict with the default parameters of SaDe :rtype dict """ return {'max_evals': 10000 * dim, 'population_size': 60, 'callback': None, 'individual_size': dim, 'seed': None, 'opts': None, 'terminate_callback': None}
0763c03c0d15a185069ffe4189a24dae863ef0e4
461,307
from typing import Optional from typing import Union from typing import List def _make_filter_string( schema_title: Optional[Union[str, List[str]]] = None, in_context: Optional[List[str]] = None, parent_contexts: Optional[List[str]] = None, uri: Optional[str] = None, ) -> str: """Helper method to format filter strings for Metadata querying. No enforcement of correctness. Args: schema_title (Union[str, List[str]]): Optional. schema_titles to filter for. in_context (List[str]): Optional. Context resource names that the node should be in. Only for Artifacts/Executions. parent_contexts (List[str]): Optional. Parent contexts the context should be in. Only for Contexts. uri (str): Optional. uri to match for. Only for Artifacts. Returns: String that can be used for Metadata service filtering. """ parts = [] if schema_title: if isinstance(schema_title, str): parts.append(f'schema_title="{schema_title}"') else: substring = " OR ".join(f'schema_title="{s}"' for s in schema_title) parts.append(f"({substring})") if in_context: for context in in_context: parts.append(f'in_context("{context}")') if parent_contexts: parent_context_str = ",".join([f'"{c}"' for c in parent_contexts]) parts.append(f"parent_contexts:{parent_context_str}") if uri: parts.append(f'uri="{uri}"') return " AND ".join(parts)
835369fe5add016bb4807c90351443d98eb967b5
413,585
def get_insert_query(table_name): """Build a SQL query to insert a RDF triple into a SQlite dataset""" return f"INSERT INTO {table_name} (subject,predicate,object) VALUES (?,?,?) ON CONFLICT (subject,predicate,object) DO NOTHING"
ac8471b7de15780e90f0ead1594c9ffbc06394cb
530,692
def _path2map(path, targ_length): """ Turn a path into a list of lists mapping target scans to one or more reference scans. """ idx_map = [[] for x in range(targ_length)] for step in path: idx_map[step[1]].append(step[0]) return idx_map
cd23a43ba6ba57fdaeee69207023af44844ea0e9
323,579
def records_from_join_res(list_of_tuples): """ Given a list of tuples from the result of a sqlalchemy join query returns a dictionary mapping a class to a set of instances of that class. :param list_of_tuples: sqlalchemy join query result (list of tuples) :return: dictionary of classes to unique instances set (dict) {Class: {inst1, inst2}} """ class_instances = {} # {Class: {inst1, inst2}} # iterate through each substrate/phosphosite/kinase for instances_tuple in list_of_tuples: # iterate through each instance inthe tuple for instance in instances_tuple: if instance: # add a dict key for each type of class class_obj = type(instance) cls_set = class_instances.setdefault(class_obj, set()) cls_set.add(instance) return class_instances
b109b69a79c7ea49272e51d5177e9ca4224d7e80
500,033
def recursive_check(task, attr="rerun"): """Check if a task or any of its recursive dependencies has a given attribute set to True.""" val = getattr(task, attr, False) for dep in task.deps(): val = val or getattr(dep, attr, False) or recursive_check(dep, attr) return val
b85773b4dcadb20b97e2777c6736654bb1b72957
40,209
def generate_list_of_images(path_to_dir): """ Returns a list of all the image paths in the provided directory """ assert(path_to_dir.is_dir()) file_paths_input = [] for file in path_to_dir.iterdir(): if file.suffix.lower() in ['.jpg', '.png', '.jpeg']: file_paths_input.append(file) return file_paths_input
f736c7d01bf1cc11667999cafac55451547382d1
454,203
def split_tuple(s): """Split a string of comma-separated expressions at the top level, respecting parentheses and brackets: e.g. 'a, b, c(d, e), f' -> ['a', 'b', 'c(d, e)', 'f'] Args: s (string): a string of comma-separated expressions which may themselves contain commas Returns: list(str): a list of top-level expressions """ bl = 0 # Bracketing level ls = [] w = "" for i, c in enumerate(s): if c in "[(": bl += 1 elif c in ")]": bl -= 1 if c == "," and not bl: # Add running word to the list as long as we're outside of brackets ls.append(w.strip()) w = "" else: w += c # Add character to running string ls.append(w.strip()) return ls
ae181b9a97a75f7dbdad2d95e589ffef0773f2d2
148,617
def is_even(x): """return true if x is even.""" return x//2*2 == x
a234c1085fc13cc78ead9c771fd3dbb6cf6a198a
343,855
def is_revisited_dataset(dataset_name: str): """ Computes whether the specified dataseet name is a revisited version of the oxford and paris datasets. simply looks for pattern "roxford5k" and "rparis6k" in specified dataset_name. """ if dataset_name in ["roxford5k", "rparis6k"]: return True return False
83711e78a7793fd3565d4598e2785c3d1f630369
379,833
def is_indented(text): """ Simple check to see if a line is indented For now, a line that starts with ANY whitespace is indented """ return bool(len(text) - len(text.lstrip()))
1bf7b02595518878f2db5fc72a6b6b44f5a1f5d7
441,088
def clean_link_text(link_text): """ The link text sometimes contains new lines, or extrananeous spaces, we remove them here. """ return link_text.strip().replace('\n', '').replace('\r', '')
09de717a280c26cb3d272fd40dcdbe8091a5a002
620,738
def read_matrix(filename): """ Reads a file containing integers to a matrix(list of lists). """ digits = [] with open(filename) as f: for line in f.readlines(): ints = [int(n) for n in line.split()] digits.append(ints) return digits
010b98c51f97787b2f14b20078bf916df9fa5382
482,724
def remove_near_elements(arr, time_difference, time_idx) -> list: """Remove list elements within a specified time difference. Args: arr: a list of arrays or tuples time_difference: the minimum time difference between elements time_idx: the index of each arr element with an integer time Returns: list: arr with certain elements removed, if necessary """ new = [] time_to_beat = 0 for i in range(len(arr)): if arr[i][time_idx] >= time_to_beat: new.append(arr[i]) time_to_beat = arr[i][time_idx] + time_difference return new
7d27f49cf97447abdbac2f1535134b7bc2e4c7f8
412,403
def batched_index_select_nd(t, inds): """ Index select on dim 1 of a n-dimensional batched tensor. :param t (batch, n, ...) :param inds (batch, k) :return (batch, k, ...) """ return t.gather( 1, inds[(...,) + (None,) * (len(t.shape) - 2)].expand(-1, -1, *t.shape[2:]) )
a42758470d4268185a6f6bc0183b820f2933d4a5
534,481
def rm_var(di): """Helper to remove the varname key from aggregation output dict Args: di (dict): dict from *aggr_out results Returns: dict without the "varname" key """ del di["varname"] return di
cf4c377a70a7d3a5e6588a428c4f0a26902fc4f1
136,519
def dict_to_prop(d): """convert dictionary to multi-line properties""" if len(d) == 0: return "" return "\n".join("{0}={1}".format(k, v) for k, v in d.items())
c3421b1ed1a2efdf0bfe4e29846d2f585966d58b
549,089