content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def unparse_msgid_article(obj): """Unparse a message-id or article number argument. Args: obj: A messsage id or an article number. Returns: The message id or article number as a string. """ return str(obj)
f6623b88fca8985fc9bd594b1c8819d04a523012
183,158
from typing import List def ListNumbersToIntInBase(digits: List[int], base: int = 10) -> int: """Convert a list of digits into it's integer value into the coresponding base Args: digits (List[int]): List of digits to convert base (int, optional): base to convert. Defaults to 10. Returns: int: interger value of the number """ val = 0 exp = len(digits)-1 for digit in digits: val += digit*pow(base, exp) exp -= 1 return val
6286683bd6348a07b4e650d1f799208e24cdffde
481,998
def portfolio_name(fictional: bool) -> str: """returns the proper name (as string) of the portfolio""" return "fictional_crypto_positions.p" if fictional else "actual_crypto_positions.p"
612724e6a303b2bfa53f7fdf0470f468362571d6
455,482
def map_args(tree, args): """ Given a tree and a list of arguments, produce the tree with the arguments instead of integers at the leaves of the tree. E.g. for tree = [[1, 2], [3, 4]] and args = [a, b, c, d] we get [[a, b], [c, d]]. """ (s, t, a) = tree if a[0] == 1: return args[0] return [map_args((1, t[0], a[1]), args[:a[1][0]]), map_args((1, t[1], a[2]), args[a[1][0]:])]
ffe0b977d508d33227586a56dee8cae36f21b31b
42,991
import re def isAsciiName(s): """ Return true iff s is pure-ascii, and a syntactically valid XML name. >>> isAsciiName("a") True >>> isAsciiName("ab.-dc") True >>> isAsciiName("") False >>> isAsciiName(".foo") False """ return re.match(r'^[A-Za-z\_\:][A-Za-z0-9\_\:\-\.]*$', s) != None
e3f33d249fc55de80e25bdf481ee5645b8153398
553,430
def _test_failure(block_id, block_dict, extra_args): """ Automatically returns failure, no reason """ return False
410faf8e908f3cebdd8e640e4a40b0cd43fb42c4
360,100
def get_species_from_cf_headerline(line): """Get the number of species and the names fom a counts format header line. :param str line: The header line. :rtype: (int n_species, [str] sp_names) """ sp_names = line.split()[2:] n_species = len(sp_names) if n_species < 2: print("Error: Not sufficiently many species (<2).\n") raise ValueError() return (n_species, sp_names)
fdf77624c1e97c76710207d69afa3f6b8467ece4
408,451
def merge_datasets(dataset_list): """ Merge dataset objects in the list :param dataset_list: datasets in list :type dataset_list: List[Dict{}] :return: Merged dataset :rtype: Dict{} """ result_datset = {} for dataset in dataset_list: for split in dataset: if split not in result_datset: result_datset[split] = {'images': {}, 'boxes': {}} for img_name in dataset[split]['images']: result_datset[split]['boxes'][img_name] = dataset[split]['boxes'][img_name] result_datset[split]['images'][img_name] = dataset[split]['images'][img_name] return result_datset
58b1325cd4d6330273ddfd9001d7794b4ca12888
411,977
def parseGithubUrl(fullUrl, stripDotGit=False): """ Get user/organisation and repository name from github remote name (optionally removing the trailing ".git") """ remote, repo = fullUrl.split(":")[-1].split("/")[-2:] if stripDotGit: repo = repo.strip(".git") return remote, repo
f8d3048e4a40b8a1aab11df55dda011d5a3c9d27
581,837
def longest_seq(seqs): """ Find the longest chain in the output of all_uninterrupted_seqs """ max_len = 0 max_seq = [] for seq in seqs: if len(seq) >= max_len: max_seq = seq max_len = len(max_seq) return max_seq
a2d98e04ba652d11aa853a6bc6c0298a9629e802
119,726
def covert_unc(host, path): """ Convert a file path on a host to a UNC path.""" return ''.join(['\\\\', host, '\\', path.replace(':', '$')])
39a5b554faec4cdfb7ba4afcd23f5d81ad758e32
562,048
def normPath(path: str) -> str: """ Helper function that's mission is to normalize path strings. Accomplishes this by changing path strings that are passed in to it from Windows style path strings (unescaped backslashes) and windows style python path strings (escaped backslashes i.e. double backslash) to unix style strings (forward slashes) :param path: str, path to normalize. :return: """ # first convert any instances of double backslash paths (e.g. windows python style) # to single backslash paths result = '\\'.join(path.split(r'\\')) # then, convert all single backslash paths (e.g. windows style) to unix-style paths (also compatible) # with python in windows result = '/'.join(result.split('\\')) return result
1a858b2ed2ab7a2ea2d8e6e7dd33417aea09cae8
54,020
def find_val_or_next_smaller_iter(bst, x, d=None): """Get the greatest value <= x in a binary search tree. Returns None if no such value can be found. """ while True: if bst is None: return d elif bst.val == x: return x elif bst.val > x: (bst, x, d) = (bst.left, x, d) else: (bst, x, d) = (bst.right, x, bst.val)
16b018345f4be498c644868a4661eee7e7a8b499
637,063
def RGBToString(rgb_tuple): """ Convert a color to a css readable string """ color = 'rgb(%s,%s,%s)' % rgb_tuple return color
1035d64dd0c399d787f9f09dde188632d139be5f
312,448
def get_formatted_emg(emg_row): """ :param emg_row: dict [str] one row that represent data from Electromyograph sensor example: ['2018-07-04T17:39:53.743240', 'emg', '-1', '-6', '-9', '-9', '1', '1', '-1', '-2', '2018-07-04T17:39:53.742082'] :return: formatted emg row example: ['2018-07-04T17:39:53.743240', '-1', '-6', '-9', '-9', '1', '1', '-1', '-2'] """ new_emg_row = emg_row.copy() new_emg_row.pop(1) # remove 'emg' word new_emg_row.pop(9) # remove last timestamp return new_emg_row
ea939fdf8e99a1048a48ce6f021d03ca4143b862
684,310
import re def guess_x0(name): """ Attempt to automatically detect the value of the lower discrete mass (e.g. 0 mm for precipitation) """ prog = re.compile(".*precip.*") if prog.match(name.lower()): return 0 prog = re.compile("RH") if prog.match(name): return 0 return None
c4bfaa0bba8d2f5f89679e508e8a5ac6467e2815
476,532
def read_recipes(filename): """Reads the file and returns its content as a list of strings.""" with open(filename, 'r') as file_handle: return file_handle.read().splitlines()
513c087395d286ccd922ecbfa6f5d2547b957c1e
247,990
def denormalize(x_point: float, mean: float, width: float) -> float: """de-normalize the data point Args: x (float): the data point mean (float): the mean value width (float): the width Returns: float: the de-normalized value """ return 0.5 * width * x_point + mean
a7811b9baaea893b97f98eb8937cefe5022816c8
489,798
def get_genomic_sequence(genome, chrom, start, end): """ Return a sequence for the genomic region. start, end: 1-based, end-inclusive coordinates of the sequence. """ if start > end: return '' else: return str(genome[str(chrom)][start - 1:end]).upper()
7f0e2829d5c643e0c2f3dd0a33633310cf248cba
284,649
import random def varAnd(population, toolbox, cxpb, mutpb): """Part of an evolutionary algorithm applying only the variation part (crossover **and** mutation). The modified individuals have their fitness invalidated. The individuals are cloned so returned population is independent of the input population. :param population: A list of individuals to variate. :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution operators. :param cxpb: The probability of mating two individuals. :param mutpb: The probability of mutating an individual. :returns: A list of varied individuals that are independent of their parents. The variator goes as follow. First, the parental population :math:`P_\mathrm{p}` is duplicated using the :meth:`toolbox.clone` method and the result is put into the offspring population :math:`P_\mathrm{o}`. A first loop over :math:`P_\mathrm{o}` is executed to mate consecutive individuals. According to the crossover probability *cxpb*, the individuals :math:`\mathbf{x}_i` and :math:`\mathbf{x}_{i+1}` are mated using the :meth:`toolbox.mate` method. The resulting children :math:`\mathbf{y}_i` and :math:`\mathbf{y}_{i+1}` replace their respective parents in :math:`P_\mathrm{o}`. A second loop over the resulting :math:`P_\mathrm{o}` is executed to mutate every individual with a probability *mutpb*. When an individual is mutated it replaces its not mutated version in :math:`P_\mathrm{o}`. The resulting :math:`P_\mathrm{o}` is returned. This variation is named *And* beceause of its propention to apply both crossover and mutation on the individuals. Note that both operators are not applied systematicaly, the resulting individuals can be generated from crossover only, mutation only, crossover and mutation, and reproduction according to the given probabilities. Both probabilities should be in :math:`[0, 1]`. """ offspring = [toolbox.clone(ind) for ind in population] # Apply crossover and mutation on the offspring for i in range(1, len(offspring), 2): if random.random() < cxpb: offspring[i-1], offspring[i] = toolbox.mate(offspring[i-1], offspring[i]) del offspring[i-1].fitness.values, offspring[i].fitness.values for i in range(len(offspring)): if random.random() < mutpb: offspring[i], = toolbox.mutate(offspring[i]) del offspring[i].fitness.values return offspring
fee94e88cba8c9a208f9b4322ceb08aee1e05baa
520,885
import itertools def head(stream, n=10): """Convenience fnc: return the first `n` elements of the stream, as plain list.""" return list(itertools.islice(stream, n))
97af4b27bf102ee72f7f2507109d97ad68b97964
581,775
def _merge_weights(spin_d1, spin_d2): """Sum the weights stored in two dictionaries with keys being the spins""" if len(spin_d1) != len(spin_d2): raise RuntimeError("Critical - mismatch spin-dict length") out = {} for spin in spin_d1: out[spin] = spin_d1[spin] + spin_d2[spin] return out
6e99afd66192954db1eae9d4f958dd2ca2b23c59
669,300
import torch def mat2flat(h): """ Converts an homography matrix with shape `[1, 3, 3]` to its corresponding flattened homography transformation with shape `[1, 8]`. """ h = torch.reshape(h, [-1, 9]) return (h / h[:, 8:9])[:, :8]
dc6c883048fc09dd13b9df3c906050520490e342
277,864
def resample_data(df, t, my_cols): """ Returns a dataframe with resampled data [mean, std, count]. Parameters: df (pandas DataFrame): dataframe t ('T', 'H', 'D') : minute, hour or day my_cols (list-like): selected columns """ df_mean = df[my_cols].resample(t).mean() df_std = df[my_cols].resample(t).std() df_count = df[my_cols].resample(t).count() return df_mean.join(df_std, rsuffix='_std').join(df_count, rsuffix='_count')
80062df5edead2b955a7d2c01cb24324d547f124
80,565
from typing import Sequence import codecs def _read_notebook(path: str, encoding: str) -> Sequence[str]: """ Read a source notebook into a list of lines, removing trailing newlines. :param path: the path to the source notebook :param encoding: the encoding to use to read the notebook :return: the list of lines """ buf = [] with codecs.open(path, mode="r", encoding=encoding) as f: for line in f.readlines(): # Don't use rstrip(), because we want to keep any trailing white # space, except for the trailing newline. if len(line) == 0: buf.append(line) continue if line[-1] != "\n": buf.append(line) continue buf.append(line[:-1]) return buf
c5b0725caf8e4ec1af7c6ce43ae9c31984a49182
324,659
def authorization(context): """Construct header with authorization token for the server API calls. Returned dict can be added to the 'request' object. """ return {'Authorization': 'Bearer {token}'.format(token=context.token)}
c91eec533f26a3a2bef61681087830f8889dfe2d
442,823
import time import logging def timed(fun): """Decorator to measure execution time of a function Args: fun (``function``): Function to be timed Returns: ``function``: decorated function Example: :: import time from pygrfnn.utils import timed # decorate a function @timed def my_func(N, st=0.01): for i in range(N): time.sleep(st) # use it as you would normally would my_func(100) """ def log_wrapper(*args, **kwargs): t0 = time.time() output = fun(*args, **kwargs) elapsed = time.time() - t0 if elapsed < 60: elapsed_str = '%.2f seconds' % (elapsed) else: elapsed_str = time.strftime('%H:%M:%S', time.gmtime(elapsed)) logging.info('\n%s took %s' % (fun.__name__, elapsed_str, )) return output return log_wrapper
fd2e71527d7a72f2a369e629eb7ca7cf369d641f
391,976
import struct import socket def ip_to_int(network: str) -> int: """ Takes an IP and returns the unsigned integer encoding of the address :param str network: ip address :return: unsigned integer encoding """ return struct.unpack('=L', socket.inet_aton(network))[0]
973208a95aa01fd83a475d02c66196e3a0e3d47f
522,899
import math def filter_by_slope(slope): """ Filter horizontal lines :param slope: the slope of the line :return: False if the line is almost horizontal, True otherwise """ # The calculation is done creating a valid zone around the vertical deviation_from_vertical = 70*math.pi/180 vertical = 90*math.pi/180 # Transpose the slope to the 1 and 2 quadrants if slope < 0: slope += math.pi # Filter if vertical-deviation_from_vertical < slope < vertical+deviation_from_vertical: return True return False
98c0fcbc9292d7b1a9131705b144d3c19447a61e
325,088
import torch def invariant_loss(x, y, symmetry): """ Finds permutation invariant loss, for fixed set of allowed permutations. Computes picking minimal loss under all permutations. Uses L2 loss. :param x: Input of shape (..., d) :param y: Output of shape (..., d) :param symmetry: Symmetry transformation matrices of shape (n, d, d). Must include identity :return: Loss of shape (...) """ batch_shape = x.shape[:-1] d = x.shape[-1] x = x.view(-1, d) # [b, d] y = y.view(-1, d) # [b, d] x_transformed = torch.einsum("nde,be->nbd", [symmetry, x]) diff = (y - x_transformed).pow(2).sum(2) # [n, b] min_diff = torch.min(diff, dim=0)[0] # [b] return min_diff.view(batch_shape)
7cbcbb2ba49757d940bbaf5bc9f5668c00a74b9d
632,920
def SIR(t, y, N, kappa, tau, nu): """ Expresses SIR model in initial value ODE format, including time, state variables, and parameters required to compute the derivatives Parameters --------------- t : array Independent variable y : array-like Initial state of system, [S, I, R] parameters : array-like Parameters of model [N, kappa, tau, nu] Returns ---------------- f : array-like Array of derivatives for the system to be numerically integrated """ S, I, R = y dSdt = - kappa * tau / N * S * I dIdt = (kappa * tau / N * S - nu) * I dRdt = nu * I f = [dSdt, dIdt, dRdt] return f
6ac2469813769beeae9171b55be5d535f4a0c3eb
487,992
import re def substitute_category_name(category_name): """ Replaces whitespace and '-' characters in `category_name` to allow category_name to be made into a valid Python identifier. Doesn't check all possible ways a string might be invalid; the user of the collate_content module is advised to use categories with Python-friendly names. """ return re.sub(r"\s", "_", category_name).replace("-", "_").lower()
284fe8b654aff53560dc66ecf75430761e8a3ba6
267,013
def lightness_correlate(Y_b, Y_w, Q, Q_w): """ Returns the *Lightness* correlate :math:`J`. Parameters ---------- Y_b : numeric Tristimulus values :math:`Y_b` the background. Y_w : numeric Tristimulus values :math:`Y_b` the reference white. Q : numeric *Brightness* correlate :math:`Q` of the stimulus. Q_w : numeric *Brightness* correlate :math:`Q` of the reference white. Returns ------- numeric *Lightness* correlate :math:`J`. Examples -------- >>> Y_b = 100.0 >>> Y_w = 100.0 >>> Q = 22.209765491265024 >>> Q_w = 40.518065821226081 >>> lightness_correlate(Y_b, Y_w, Q, Q_w) # doctest: +ELLIPSIS 30.0462678... """ Z = 1 + (Y_b / Y_w) ** 0.5 J = 100 * (Q / Q_w) ** Z return J
854785cf9de0a03dcfc16a4e094dd39298f9abd7
46,469
def use_aws_kms_store(session, region, kmskeyid, access_id, secret, encryption_pwd=None, old_encryption_pwd=None, return_type=None, **kwargs): """ Sets the encryption password globally on the VPSA. This password is used when enabling the encryption option for a volume. CAUTION: THIS PASSWORD IS NOT STORED ON THE VPSA - IT IS THE USER'S RESPONSIBILITY TO MAINTAIN ACCESS TO THE PASSWORD. LOSS OF THE PASSWORD MAY RESULT IN UNRECOVERABLE DATA. :type session: zadarapy.session.Session :param session: A valid zadarapy.session.Session object. Required. :type region: str :param region: The AWS KMS region code to set. Required. :type kmskeyid: str :param kmskeyid: The AWS KMS key id to set. Required. :type access_id: str :param access_id: The AWS KMS access id to set. Required. :type secret: str :param secret: The AWS KMS secret password to set. Required. :type encryption_pwd: str :param encryption_pwd: The master encryption password to set. Required. :type old_encryption_pwd: str :param old_encryption_pwd: Old master encryption password. Required if setting a new password and older password is already set. Optional. :type return_type: str :param return_type: If this is set to the string 'json', this function will return a JSON string. Otherwise, it will return a Python dictionary. Optional (will return a Python dictionary by default). :rtype: dict, str :returns: A dictionary or JSON data set as a string depending on return_type parameter. """ body_values = {'region': region, 'kmskeyid': kmskeyid, 'access_id': access_id, 'secret': secret} if encryption_pwd: body_values['encryption_pwd'] = encryption_pwd if old_encryption_pwd: body_values['old_encryption_pwd'] = old_encryption_pwd path = '/api/settings/encryption.json' return session.post_api(path=path, body=body_values, return_type=return_type, **kwargs)
de478ab6633f0ab179511d24d2bc1e4b35408d4e
104,851
def get_bonds(input_group): """Utility function to get indices (in pairs) of the bonds.""" out_list = [] for i in range(len(input_group.bond_order_list)): out_list.append((input_group.bond_atom_list[i * 2], input_group.bond_atom_list[i * 2 + 1],)) return out_list
4f39d9d588a1d3e919fcd5e369cd72c6dbac3442
700,381
import torch def squared_difference(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: """ Compute (x - y)(x - y) element-wise. """ return (x - y) ** 2
c95b8c1c71a0dbacea405679874ac37e4dd82384
94,158
import pkg_resources def resource_string(path): """Handy helper for getting resources from our kit.""" return pkg_resources.resource_string(__name__, path).decode("utf8")
583f0327bae3a17c45d064e028dea12193d12c6c
551,214
import re def sanitize_tag_label(label_string): """Return string slugified, uppercased and without dashes.""" return re.sub(r'[-\s]+', '-', (re.sub(r'[^\w\s]', '',label_string).strip().upper()))
1fee28a86533a5fda3a34a647c445fdaa73e3c59
696,953
def relative_len_difference(lca, lcb): """ :param lca: lcase words in sent_a, without stopwords :param lcb: lcase words in sent_b, without stopwords :return: Float, a similarity score Get ratio of sentence difference to the maximum sentence length """ la, lb = len(lca), len(lcb) return abs(la - lb) / float(max(la, lb) + 1e-5)
98d67c8b9ee333b29ce5ebcce9c35308e63b3227
352,911
def ordered_distinct(collection): """ Return the unique elements of the given collection, preserving the order of elements. """ seen = set() return [x for x in collection if x not in seen and (seen.add(x) or True)]
9891469db9cc5f876f902b8f904a71e28a73dc76
239,989
import yaml def load(filename): """ Load a yaml file and return a tuple for success, content """ with open(filename, 'r') as stream: try: return True, yaml.safe_load(stream) except yaml.YAMLError as exc: return False, exc
cec9c7a77561db7fd22bbc7fd979cc421a1d4b66
445,333
def create_access_args(current_actor_id=None, superuser_actor_ids=None): """Returns a dict that can be provided to resource registry and datastore find operations to indicate the caller' actor and system superusers """ access_args = dict(current_actor_id=current_actor_id, superuser_actor_ids=superuser_actor_ids) return access_args
73f02198757d003a651a8fccccb69f3534468771
147,090
def makePollBar(name: str, numVotes: int, maxNameLength: int, maxVotes: int, maxBarLength: int) -> str: """Make a bar for a poll results bar chart, for the statistics of a given poll option. :param str name: The name of the poll option :param int numVotes: The number of votes that the option received :param int maxNameLength: The length of the longest option name in the poll :param int maxVotes: The number of votes received by the winning option :param int maxBarLength: The maximum length a bar may be :return: A string containing the name of the option, followed by a number of = characters proportional to the number of votes this option received in relation to the winning option, followed by the nuber of votes. :rtype: str """ winner = numVotes == maxVotes nameSpacing = maxNameLength - len(name) barLength = int((numVotes / maxVotes) * maxBarLength) return name + (" " * nameSpacing) + " | " \ + ("=" * barLength) + ("" if numVotes else " ") \ + ("🏆" if winner else "") + " +" + str(numVotes) + " Vote" + ("" if numVotes == 1 else "s")
c25681f45fd2c85b7d895819faf7fc033c46b891
245,887
from typing import Tuple def parse_description(description: str) -> Tuple[int, str]: """Parse task description into amount and currency.""" raw_amount, currency = description.split(' ') raw_amount = raw_amount.replace('k', '000') return int(raw_amount), currency
181f0f0fb57acae68b6bcbc7315387fdb63e8056
660,119
def normalize_factors(factors): """Normalize the factor list into a list of individual factors. The factor argument has "append" behavior (-f foo -f bar), and each of these arguments may be a comma-separated list of factors. Normalize this into a flat list of individual factors. e.g., >>> normalize_factors(['py37', 'lint,isort']) ['py37', 'lint', 'isort'] Args: factors: A list of comma-separated factor strings. Returns: The list flattened, individual factors. """ assert isinstance(factors, list), ( 'Expected `factors` list to be a list, got `{cls}`.' .format(cls=type(factors).__name__)) flattened = [ f.strip() for flist in factors for f in flist.split(',') ] # Remove empty strings return [f for f in flattened if f]
2c50b763b7ced043fa3eb028ce96f5c3a666cefe
142,335
def filtrar_caracteristica(lista, caracteristica, alvo): """ Cria uma lista com os elementos de 'lista' cuja posição em 'caracteristica' seja igual a 'alvo'. Por exemplo, com a entrada abaixo, retornaríamos ['Alemanha', 'Portugal']: lista = ['Brasil', 'Alemanha', 'Angola', 'Portugal'] caracteristica = ['América do Sul', 'Europa', 'África', 'Europa'] alvo = 'Europa' Parâmetros: listas de números ou strings e um valor alvo. Retorna: a lista com a característica filtrada. """ nova_lista = [] for i in range(len(caracteristica)): if caracteristica[i] == alvo: nova_lista.append(lista[i]) return nova_lista
efc49fb40347e4d221dd798e0d91c1d21baec2aa
318,719
def _serializeSubIds(subIds): """ Serialize a set of unit subIds, without regard for the playerId. """ val = 0 for i in subIds: val |= (1 << i) return "{:x}".format(val)
d17ea000aec6fd783488cbe6c4ecea5fd83e8372
148,814
import glob def search_dir(dir_name: str, file_suffix: str) -> list: """Iterates through a directory and all of its subdirectories and returns a list of absolute paths for all file with a given suffix. dir_name: absolute paht to directory file_suffix: string with file extension (e.g: .xlsx; .txt; etc) """ files = [file for file in glob.glob(f"{dir_name}/**/*{file_suffix}", recursive=True)] return files
9b3234183d052d041225bcafc1ca767c9999218d
325,462
from typing import List def read_lines_from_file(path: str, strip: bool = False) -> List[str]: """Read file into stripped lines.""" with open(path, "r") as f: lines = f.readlines() if not strip: return lines return list([line.strip() for line in lines])
299466534b9d1646f42b5a3137794bde309f500d
687,519
import statistics def linear_regression(xs, ys): """ Computes linear regression coefficients https://en.wikipedia.org/wiki/Simple_linear_regression Returns a and b coefficients of the function f(y) = a * x + b """ x_mean = statistics.mean(xs) y_mean = statistics.mean(ys) num, den = 0.0, 0.0 for x, y in zip(xs, ys): num += (x - x_mean) * (y - y_mean) den += (x - x_mean) * (x - x_mean) a = num / den b = y_mean - a * x_mean return a, b
6b6ecbd31262e5fe61f9cf7793d741a874327598
706,426
def _strip_scope(name, scope, additional_scope): """Returns the name with scope stripped from it.""" if additional_scope: name = name.replace("{}/".format(additional_scope), "") if scope: name = name.replace("{}/".format(scope), "", 1) return name
4cf38273d14daade449da4f077f6f8dc56233492
402,572
def get_source_url(event): """ Rebuilds the source url from the lambda event """ host = event['headers']['Host'] scheme = event['headers']['X-Forwarded-Proto'] path = event['path'] return scheme + "://" + host + path
eaa6a57ce4947d37581b9e90b65364cc911170ee
219,681
import re def extract_time(input_: str) -> list: """Extracts 12-hour time value from a string. Args: input_: Int if found, else returns the received float value. Returns: list: Extracted time from the string. """ return re.findall(r'(\d+:\d+\s?(?:a.m.|p.m.:?))', input_) or \ re.findall(r'(\d+\s?(?:a.m.|p.m.:?))', input_) or \ re.findall(r'(\d+:\d+\s?(?:am|pm:?))', input_) or \ re.findall(r'(\d+\s?(?:am|pm:?))', input_)
5f6ca07cd6637f0692fddb23c859e6b6765a1d6d
99,972
def xml_find(tag_name, xml): """Returns the child with tag `tag_name` of the XML element `xml`. It throws an `AssertationError` when no or more than one children of the tag `tag_name` are defined.""" if xml is None: raise TypeError("xml_text(): XML argument must not be 'None'") results = xml.findall(tag_name) assert results, f"Child with tag `{tag_name}` not found." assert len(results) < 2, f"Too many children with tag `{tag_name}` found." return results[0]
b18971f85ea69f053cc9989ed4676a6c8d237859
304,365
def _build_intel_config(config, config_files): """Builds the wifi configuration for the intel driver. Args: config: Config namedtuple config_files: Map to look up the generated config files. Returns: wifi configuration for the intel driver. """ design_name = config.hw_design.name.lower() return config_files.wifi_sar_map.get(design_name)
f4598c8a485dd61647b7adde0374326bae92cc7c
61,739
def raw_next_line() -> str: """ Gets a completely raw line from the console :return: The line that was retrieved from the console """ return input()
b7d940cde14272d84706b8406b9a4198aa14a76e
388,725
import re def reformat_accession(seq_record): """ Reformat accessions in Seq object to be <=20 chars long and not contain any special chars """ if len(seq_record.id) > 20: short_id = seq_record.id[:20] else: short_id = seq_record.id seq_record.id = re.sub('[|,/,\,.,:,\,),(]', '_', short_id) return seq_record
e68548b5197a0078bc180a2fc461b98a684f7901
519,735
def contains(text: str, pattern: str, start=0) -> bool: """Return a boolean indicating whether pattern occurs in text.""" assert isinstance(text, str), 'text is not a string: {}'.format(text) assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text) # Iterative # ! Runtime = O(n), n is len of text. Text must be iterated over. # case: pattern length is greater than text if len(pattern) > len(text): return False # case: pattern is empty if pattern == '' or "": return True # no need to check the rest of text if pattern length exceeds remainder for index in range(start, len(text) - len(pattern) + 1): # case: check if first letter of pattern exists in text if text[index] == pattern[0]: # A slice of text that is the length of pattern starting from text[i] to check if the remaining pattern follows after text[i] rest = text[index: index + len(pattern)] if rest == pattern: return True # case: pattern not in text return False
0239ab7755dab39128ad0e50d41d49a2fd59b840
454,289
import codecs def is_ascii_encoding(encoding): """Checks if a given encoding is ASCII.""" try: return codecs.lookup(encoding).name == "ascii" except LookupError: return False
34b7e9ff3bcab56607062740d2caa7cd5bbeecd3
683,339
def sub_account_spot_summary(self, **kwargs): """Query Sub-account Spot Assets Summary (For Master Account) GET /sapi/v1/sub-account/spotSummary https://binance-docs.github.io/apidocs/spot/en/#query-sub-account-assets-for-master-account Keyword Args: email (str, optional): Sub account email page (int, optional): Default: 1 size (int, optional): Default 10, max 20 recvWindow (int, optional): The value cannot be greater than 60000 """ return self.sign_request("GET", "/sapi/v1/sub-account/spotSummary", kwargs)
47cd03c85eadc217103b2887457bb122dd35f48b
547,779
def mean(num_list): """ Computes the mean of a list of numbers Parameters ---------- num_list : list List of number to calculate mean of Returns ------- mean : float Mean value of the num_list """ # Check that input is of type list if not isinstance(num_list, list): raise TypeError('Invalid input %s - must be type list' % (num_list)) list_sum = 0.0 list_len = len(num_list) # Check that the list is not empty if list_len == 0: raise ValueError("Num list is empty") for el in num_list: list_sum += el return list_sum / list_len
7932b52a44515ec118f67365609b979452aee0eb
686,749
from typing import List from typing import Dict def _maps_to_dict(maps: List[str]) -> Dict[str, str]: """ Turns a list of colon-separated maps into a config dict. :param maps: The list of maps. :return: The transformed list as a dict. """ usernames_slugs = {} for map_ in maps: user, repo = map_.split(':', 1) usernames_slugs[user] = repo return usernames_slugs
188cc34d2417d11df54404dd60a6627fa1b5eac4
427,997
import codecs def text_from(path, encoding="utf-8"): """ Return the text from a file. :param path: file path string :param encoding: file encoding """ with codecs.open(path, "r", encoding) as fp: return fp.read()
4f3e47caa85627fee61331571ee5d5e5bb0c2d05
475,473
def parse_sdf_str(sdf_str): """ Parses SDF strings to get SDF type and keyword arguments for the function call. Args: sdf_str: ``str`` -> SDF strings are formatted as follows: "SDF:[sdf_type]:[I/F]:[parameter]=[value]:..." where value will be converted to either I=int or F=float, where appropriate. Colors are the exception, where always int is presumed in form of a tuple. .. note:: SDF string Example: ``SDF:circle:I:radius=100:frame_color=(80,120,10):alpha=180`` This would produce a circle with a radius of 100 pixel filled with the specified frame_color and alpha values. Returns: ``Tuple[Dict, str]`` -> kwargs as dictionary and the SDF type as string. """ elements = sdf_str.split(':') try: isfloat = {'I': False, 'F': True}[elements[2]] except KeyError: raise ValueError(f'Expected either "I" or "F" for data type, got ' f'"{elements[2]}" instead.') converter = { 'width': (float, int), 'height': (float, int), 'radius': (float, int), 'corner_radius': (float, int), 'border_thickness': (float, int), 'frame_color': ( lambda x: tuple([int(i) for i in x.strip()[1:-1].split(',')]), ), 'border_color': ( lambda x: tuple([int(i) for i in x.strip()[1:-1].split(',')]), ), 'multi_sampling': (int, ), 'alpha': (int, ) } # Insures that kwargs = {} w_unit = 0 for i in elements[3:]: try: k, value = i.split('=') except ValueError: raise ValueError(f'Expected "parameter=value", got "{i}" instead.') if k in converter: try: kwargs[k] = converter[k][0 if isfloat else -1](value) except (TypeError, ValueError): raise ValueError(f'Unable to unpack parameter: "{i}"') elif isfloat and k == 'w': w_unit = int(value) else: raise ValueError(f'Unknown parameter: "{k}".') if isfloat: for k in kwargs: if converter[k][0] is float: kwargs[k] = int(kwargs[k] * w_unit + 0.5) return kwargs, elements[1]
db2b9e4595c24acccffdd856e0facbacc75f0a56
511,635
from pathlib import Path def check_arg_input_dir(input_dir: str) -> bool: """Return True of the input_dir exists. :param input_dir: the output directory :return: True if the directory exists. """ # output_format p = Path(input_dir) if not p.is_dir(): raise ValueError(f"Error! Input dir could not be found at '{input_dir}'.") return True
652216c00846fc6b709dbe98ea79983936577a31
498,952
def token(regexp, tag, right_context='', left_context=''): """ Make token definition (regexp with optional left or right context) """ return r'' + left_context + '(' + regexp + ')' + right_context + '', tag
6420f689d34520f0948c6d2be4d23f642e0fa8aa
501,725
def tuple_to_string(node): """Given a tuple of a node id and orientation :return tuple element concatenete by |: """ node_id, node_orn = node return node_id+"|"+node_orn
4c87c6b6c3340ea4f0f3a4786fb2094f4ed64d74
279,947
import hashlib def md5sum_bytes(content): """Calculate md5sum of a byte string.""" assert isinstance(content, bytes), 'Invalid type %s' % type(content) m = hashlib.md5() m.update(content) return m.hexdigest()
6fe57fb90958aa2bb5a5b9cd123a946d888a633d
595,556
import time def datetime_timestamp(v): """Get numeric timestamp. This will work under both Python 2 and 3. Args: v (datetime.datetime): Date/time value Returns: (float): Floating point timestamp """ if hasattr(v, "timestamp"): # Python 2/3 test # Python 2 result = v.timestamp() else: # Python 3 result = time.mktime(v.timetuple()) + v.microsecond / 1e6 return result
519d9a1b76a32f53bea4b09d98eee3b985694754
346,439
def make_playlist(sequence, discontinuity_sequence=0, segments=[]): """Create a an hls playlist response for tests to assert on.""" response = [ "#EXTM3U", "#EXT-X-VERSION:7", "#EXT-X-TARGETDURATION:10", '#EXT-X-MAP:URI="init.mp4"', f"#EXT-X-MEDIA-SEQUENCE:{sequence}", f"#EXT-X-DISCONTINUITY-SEQUENCE:{discontinuity_sequence}", ] response.extend(segments) response.append("") return "\n".join(response)
be49176c4854dcbb1886aeff2f473461c899bc5a
545,206
def fib_mem(n, computed={0:0,1:1}): """find fibonacci number using memoization""" if n not in computed: computed[n] = fib_mem(n-1, computed) + fib_mem (n-2, computed) return computed[n]
5d25c22ccdc5ea41fbd0faf21a8b35ac535acaef
9,661
def singleton(cls, *args, **kw): """ Create a single instance of an object, in this case a single object of RedisDB Taken from: https://stackoverflow.com/questions/42237752/single-instance-of-class-in-python :param cls: class to create only once :param args: arguments passed to class constructor :param kw: keyword passed to class constructor :return: A single instance of the cls """ instances = {} def _singleton(): if cls not in instances: instances[cls] = cls(*args, **kw) return instances[cls] return _singleton
a61a6394d0296ce357ab6828291eaabce138f8b5
342,957
def extract(filename): """ extract portion of ETL - In this case it is simply a local tab separated file - This file path is given as an input - Copy records from the file into a list and returns it """ raw_rows = [] with open(filename) as f: for line in f: raw_rows.append(line.rstrip()) return raw_rows
81cb40c9a5811c0f8175cda72a9446314a48530d
270,429
def descendants(migration, population): """ Return all descendants of ``migration`` from ``population``. :param migration: a :class:`~yoyo.migrations.Migration` object :param population: a collection of migrations """ population = set(population) descendants = {migration} while True: found = False for m in population - descendants: if set(m.depends) & descendants: descendants.add(m) found = True if not found: break descendants.remove(migration) return descendants
2714ccfad8fbcac719906da72d3d83d5e65e29ee
130,993
def _extract_common_p4c_args(ctx): """Extract common arguments for p4c build rules.""" p4file = ctx.file.src p4deps = ctx.files._p4include + ctx.files.deps args = [ p4file.path, "--std", ctx.attr.std, "--arch", ctx.attr.arch, ] include_dirs = {d.dirname: 0 for d in p4deps} # Use dict to express set. include_dirs["."] = 0 # Enable include paths relative to workspace root. args += [("-I" + dir) for dir in include_dirs.keys()] return args
16f018da5f2c35e956c58ca9e26365e49a39b366
173,450
def parse_problems(lines): """ Given a list of lines, parses them and returns a list of problems. """ res = [list(map(int, ln.split(" "))) for ln in lines] return res
4e5c62ad2028e9ed441a5aa86276d059e0d118a3
676,254
import math def sig2(x, scale=1): """ Calculate sigmoid value """ return 1/(1+math.exp(-x/scale))
c98db599dadec2cf4e6300f2b1946fa87f99ac83
212,605
def find_google_fileid_tree(service, fileId): """Find the folder tree of a file Arguments: service: in order to use any of this library, the user needs to first build the service class using google ServiceAccountCredentials. see https://pypi.org/project/google-api-v3-helper/ or https://github.com/sbi-rviot/google_api_helper for a full example. fileId: id of the file you wish to get the tree for. """ file = service.files().get(fileId=fileId, fields='id, name, parents').execute() tree = [] parent = file.get('parents') if parent: while True: folder = service.files().get( fileId=parent[0], fields='id, name, parents').execute() parent = folder.get('parents') if parent is None: break tree.append({'id': parent[0], 'name': folder.get('name')}) return tree
7daaf3cda1f9677de87fa7f30d693f61806c62b7
668,195
def regex_split(original_output, regex_split_cmd): """ Takes in a regex string and output, returns a list of output split :param original_output: :param regex_split_cmd: :return: """ def _regex_split(): return original_output.split(regex_split_cmd) return _regex_split()
81042f8ea9c4f1befe7256f52cf1ec198a9a1eba
397,137
from typing import Tuple def process_grad_date(grad_date: str) -> Tuple[str, int]: """ Infers the graduation semester and year from grad_date grad_date: str The graduation date in the format 'YYYY-MM-DD' Returns: A tuple of the semester and year. """ year = grad_date[:4] month = grad_date[5:7] if month in ['04', '05', '06']: semester = 'Spring' elif month in ['07', '08', '09']: semester = 'Summer' elif month in ['11', '12', '01']: semester = 'Fall' else: semester = "None" return semester, int(year)
db99dabf58d5ce9a3c8218bc994ff24f7957556f
141,857
def new_var(var_name, t_index, agent): """ Naming of agent variables xij, where i in {1,...,T} and j in {1,...,M} :param var_name: variable name x, u, d,... :param t_index: time index :param agent: agent index :return: string with the name of the variable """ return f"{var_name}{t_index}_{agent}"
63f2055d82c53b41dc3939451048980c28c953a0
204,578
def remove_duplicates(df_or_series): """ Remove duplicate rows or values by keeping the first of each duplicate. Parameters ---------- df_or_series : :any:`pandas.DataFrame` or :any:`pandas.Series` Pandas object from which to drop duplicate index values. Returns ------- deduplicated : :any:`pandas.DataFrame` or :any:`pandas.Series` The deduplicated pandas object. """ # CalTrack 2.3.2.2 return df_or_series[~df_or_series.index.duplicated(keep="first")]
c93da45690c5db94233552b3d925beb8dd783931
526,084
import torch def stft(x, fft_size, hop_size, win_length, window): """Perform STFT and convert to magnitude spectrogram. Args: x (Tensor): Input signal tensor (B, T). fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. window (str): Window function type. Returns: Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). """ x_stft = torch.stft(x, fft_size, hop_size, win_length, window) real = x_stft[..., 0] imag = x_stft[..., 1] # NOTE(kan-bayashi): clamp is needed to avoid nan or inf return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
92db72a1d5c4fe2a0f03476fbf974f3fca24ae72
450,317
def fizz_buzz_elif(n): """ Return the correct FizzBuzz value for n by testing divisibility in an if-elif. """ divisible_by_3 = n % 3 == 0 divisible_by_5 = n % 5 == 0 if divisible_by_3 and divisible_by_5: return "Fizz Buzz!" elif divisible_by_3: return "Fizz!" elif divisible_by_5: return "Buzz!" return n
d98b01d2dc73328884a875de753338dea58f4799
631,422
def load_input(filename): """ Load input ciphertext """ with open(filename) as f: return [int(token) for token in f.readlines()[0].strip().split(",")]
7e9fa192ad71083a98750b285ababa9e30a1b4a9
623,250
def sort_cards(cards): """Sort shuffled list of cards, sorted by rank. sort_cards(['3', '9', 'A', '5', 'T', '8', '2', '4', 'Q', '7', 'J', '6', 'K']) ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K'] """ deck = [] alphas = [] for card in cards: if card.isalpha(): alphas.append(card) else: deck.append(card) deck = sorted(deck) for _ in range(alphas.count('A')): deck.insert(0, 'A') for _ in range(alphas.count('T')): deck.append('T') for _ in range(alphas.count('J')): deck.append('J') for _ in range(alphas.count('Q')): deck.append('Q') for _ in range(alphas.count('K')): deck.append('K') return deck
cd804d4173a9cabdecf72a154c28bba1d6307812
123,445
def computeIoH(overlap, head): """ compute the ratio of intersection (of head and person) and head area intersection over head-box = area of overlap / area of head-box :param overlap: area of intersection :param head: area of head :return: IoH """ return overlap/head
72605cccf62e952b0e5533f2a1b13ab9ba133a07
525,575
import re def postprocess_pages(field, pages, **options): """Convert a page range to a 2-element tuple.""" values = re.split(r'\-+', pages) if len(values) == 2: try: return (int(values[0]), int(values[1])) except (ValueError, TypeError): return pages else: return pages
71c86e3e823169abdd9d2ad0e27416dda1dc30b3
575,076
import struct import codecs def _check_key(key_rank, key_idx, received_key_hex): """Check that the received key is the same as the sent key. Args: key_rank (int): 32-bit integer key_idx (int): 32-bit integer received_key_hex (int): hex(key_rank|key_idx) Returns: bool: is the received key is the same as the sent key """ if len(received_key_hex) != 16: return False rank = struct.unpack("<I", codecs.decode(received_key_hex[:8], "hex"))[0] idx = struct.unpack("<I", codecs.decode(received_key_hex[8:], "hex"))[0] return (rank == key_rank) and (idx == key_idx)
c3b70e3efe4e5714dd5f42f290e9685ccd01ffdb
240,140
def get_dependencies(requirements_file_path): """read dependencies from requirements.txt""" with open(requirements_file_path, 'r', encoding='utf-8') as fstream: dependencies = fstream.read() return dependencies
3abb2447c86948e68038ce9735c25b1742bbcb55
61,545
def move_down_left(rows, columns, t): """ A method that takes coordinates of the bomb, number of rows and number of columns of the matrix and returns coordinates of neighbour which is located at the left-hand side and bellow the bomb. It returns None if there isn't such a neighbour """ x, y = t if x == rows or y == 0: return None else: return (x + 1, y - 1)
3b54f796a529815611612606f7d0c90a93053b28
451,934
def toint(a): """ Convert string to int and also scale them accordingly if they end in "k", "m" or "b". """ weights = {'k': 1000, 'm': 1000000, 'b': 1000000000} if len(a) > 1: if a[-1] in weights: return weights[a[-1]] * int(a[:-1]) return int(a)
261503f0aa4884c529ec1ba1ae9449956c495993
97,997
def _check_mutual_preference(resident, hospital): """ Determine whether two players each have a preference of the other. """ return resident in hospital.prefs and hospital in resident.prefs
29856e3ef65e60f06c6b12e8b598579b89156295
631,404
import typing def chk_to_int(chk_str: bytes) -> typing.Tuple[int, int]: """ Converts a checksum string to a tuple of (fillbits, checksum). >>> chk_to_int(b"0*1B") (0, 27) """ if not len(chk_str): return 0, -1 fill_bits: int = int(chr(chk_str[0])) try: checksum = int(chk_str[2:], 16) except (IndexError, ValueError): checksum = -1 return fill_bits, checksum
c1ed10b7f3970bddd9c3b4954b677805db99b5a8
432,322
def parse_resource_type(resource_type): """Splits a resource type into it's components. :exc:`ValueError` is raised if the resource type is invalid. >>> parse_resource_type('AWS::ECS::Instance') ['AWS', 'ECS', 'Instance'] >>> parse_resource_type('AWS::ECS') Traceback (most recent call last): ... ValueError: Resource type 'AWS::ECS' is invalid >>> parse_resource_type('AWS__ECS__Instance') Traceback (most recent call last): ... ValueError: Resource type 'AWS__ECS__Instance' is invalid """ segments = resource_type.split("::") if len(segments) != 3: raise ValueError("Resource type '{}' is invalid".format(resource_type)) return segments
894a98c3048908d4459f95e3e250a92e92b13173
522,733
import hashlib def generate_unique_key(master_key_path, url): """ master_key_path: str Path to the 32-byte Master Key (for S3 Encryption) url: str S3 URL (e.g. https://s3-us-west-2.amazonaws.com/bucket/file.txt) Returns: str 32-byte unique key generated for that URL """ with open(master_key_path, 'r') as f: master_key = f.read() assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \ 'Key: {}, Length: {}'.format(master_key, len(master_key)) new_key = hashlib.sha256(master_key + url).digest() assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key) return new_key
f254a57fa02eafe7363e65859cfc3710e0b97f47
670,253
def constrain(x, xmin, xmax): """ Constrain a value x between [xmin,xmax] """ if x < xmin: return xmin else: return min(x, xmax)
a97975366de8512990d64a079d05973b3e60e939
187,823
def normalize_userinfo(userinfo): """Normalize userinfo part of the url. Params: userinfo : string : url userinfo, e.g., 'user@' Returns: string : normalized userinfo data. """ if userinfo in ["@", ":@"]: return "" return userinfo
ff2ed6906264c388ab90e070b7f0ec86c2e39f25
154,438
def keepCells(notebook): """ Finds the tag 'keep' in any cell and if it exists, remove the ones that don't have it Returns dict without 'hide' tagged cells """ has_keep = False for cell in notebook['cells']: try: if 'keep' in cell['metadata']['tags']: has_keep = True break except KeyError: pass if has_keep: clean = [] for cell in notebook['cells']: try: if 'keep' in cell['metadata']['tags']: clean.append(cell) except KeyError: pass notebook['cells'] = clean return notebook
2f485e250145605ff319673760785fc9e2622181
612,845
import hashlib def leaf_merkle_tree_hash(b): """ Calculate the Merkle Tree Leaf Hash for an object (HASH(chr(0) || b)). b the input to the leaf hash Return the leaf hash. """ return hashlib.sha256(chr(0) + b).digest()
6dbc125d0268335ffd2b292dce47ac9122522fce
474,404