content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_count_of_increased_sequent(measurements) -> int: """ Count how many sequent increases are in a list of measurements Args: measurements (List[int]): list of measurements Returns: int: number of sequent increases """ if len(measurements) < 2: raise ValueError("List contains less than 2 values to compare") count_of_increased_sequent = 0 for i in range(len(measurements)-1): if int(measurements[i+1]) > int(measurements[i]): count_of_increased_sequent += 1 return count_of_increased_sequent
8f3cb2807a605036d1f2bf5d42693a90e1286372
590,527
import torch def labels_and_weights(label_file_df): """ Get list of unique sample labels and weights of the samples using the inverse of the count. Weights is a tensor to be compatible with CrossEntropyLoss. """ labels_all = label_file_df.iloc[:,-1].astype(str).values.tolist() labels_unique = set(labels_all) labels = sorted(labels_unique) labels_count = [labels_all.count(label) for label in labels] weights = 1. / torch.tensor(labels_count, dtype=torch.float) return labels, weights
4406b2bb1db6fb2a0f42eeae0c4ac5a093906606
501,900
from collections import deque from typing import Iterable from typing import Deque def array_shift(data: Iterable, shift: int) -> Deque: """ left(-) or right(+) shift of array >>> arr = range(10) >>> array_shift(arr, -3) deque([3, 4, 5, 6, 7, 8, 9, 0, 1, 2]) >>> array_shift(arr, 3) deque([7, 8, 9, 0, 1, 2, 3, 4, 5, 6]) """ deq = deque(data) deq.rotate(shift) return deq
c14e115808592808bc9b0cf20fa8bc3d5ece7768
3,189
from typing import Tuple import collections def remove_common_characters(string1: str, string2: str) -> Tuple[str, str]: """Remove characters the two strings have in common. Both strings must be of the same length and only cointain unique characters. Characters in both ``string1`` and ``string2`` should have both characters in the same order. Args: string1 (str): The string that will be checked against ``string2``. string2 (str): The string that will be checked against ``string1``. Returns: Tuple[str, str]: The characters unique to ``string1`` and ``string2``, respectively. Raises: ValueError: If the length of ``string1`` does not equal that of ``string2``. ValueError: If either ``string1`` or ``string2`` contain duplicate characters. """ if len(string1) != len(string2): raise ValueError( "Both strings must be the same length." f" ``string1`` is of length {len(string1)}" f" and ``string2`` is of length {len(string2)}" ) for i, string in enumerate((string1, string2)): if len(string) != len(set(string)): character_counts = collections.Counter(string) duplicates = [ character for character in character_counts if 1 < character_counts[character] ] duplicate_preview = ", ".join(duplicate for duplicate in duplicates[:5]) raise ValueError( "Each string must contain only unique characters." f" ``string{i + 1}`` contains duplicates of the" f" following characters: {duplicate_preview},..." ) cleaned_string1 = "" cleaned_string2 = "" for character1, character2 in zip(string1, string2): if character1 != character2: cleaned_string1 += character1 cleaned_string2 += character2 return cleaned_string1, cleaned_string2
e6e442968004b26279c3963459723f1ec7c660bc
487,421
def find_item(tgt_dict, key): """Recursively search dictionary for key and return value""" if key in tgt_dict: return tgt_dict[key] for k, v in tgt_dict.items(): if isinstance(v, dict): item = find_item(v, key) if item is not None: return item
0934f1daa3f2c94275e937fec84fe0cc3ec6de1f
321,898
def normalized_current_date(datetime_col, min_date, max_date): """ Temporal feature indicating the position of the date of a record in the entire time period under consideration, normalized to be between 0 and 1. Args: datetime_col: Datetime column. min_date: minimum value of date. max_date: maximum value of date. Returns: float: the position of the current date in the min_date:max_date range """ date = datetime_col.dt.date current_date = (date - min_date).apply(lambda x: x.days) if max_date != min_date: current_date = current_date / (max_date - min_date).days elif max_date == min_date: current_date = 0 return current_date
2947aa4de2aa5e98ec9c33c51fbda8716ff13e37
677,050
def compare_sub(got, expected): """ Check if got subdomain of expected, throw exception if different. """ if not expected.is_subdomain(got): raise Exception("expected subdomain of '%s', got '%s'" % (expected, got)) return True
d8a5cf41ce608fce226fef8a79d2900323ce51dd
128,506
def prefix_multiline_str(prefix: str, multiline_str: str): """ :param prefix: string with which to prefix each line of multiline_str :param multiline_str: a possibly multiline string :return: prefix + (multiline_str with each \n replaced by \n + prefix) >>> prefix_multiline_str("abc: ", None) # expect no output >>> prefix_multiline_str("abc: ", "") 'abc: ' >>> prefix_multiline_str("abc: ", "xyz") 'abc: xyz' >>> print(prefix_multiline_str("abc: ", "x\\ny\\nz")) abc: x abc: y abc: z """ if multiline_str is None: return None if '\n' not in multiline_str: return prefix + multiline_str return prefix + multiline_str.replace('\n', '\n' + prefix)
e65a20be9b5540c58c58eb208e09654c62475cb7
169,362
def get_max_size(size): """ Return a get_size method for the size given""" def get_size(): return size return get_size()
479252a6c4f9326fc60e51158f13f3d36a9af556
242,607
def find_all(tofind, string): """Returns number of times a certain substring is found""" found = [i for i in range(len(string)) if string.startswith(tofind, i)] num_found = len(found) return num_found
a02b12e6a7f6a579624407f8678027fd6c17d458
585,967
def PyDateTime_GET_MONTH(space, w_obj): """Return the month, as an int from 1 through 12. """ return space.int_w(space.getattr(w_obj, space.wrap("month")))
3b3aa912fa1bff950d9385a026bce5d5870d7f17
439,930
def get_intersection(u, v, node_presence): """ Get the intersection between the presence of u and v. :param u: First Node :param v: Second Node :param node_presence: Node presence :return: Interection """ intersec = [] for ut0, ut1 in zip(node_presence[u][::2], node_presence[u][1::2]): for vt0, vt1 in zip(node_presence[v][::2], node_presence[v][1::2]): if ut0 <= vt1 and vt0 <= ut1: intersec += [max(ut0, vt0), min(vt1, ut1)] return intersec
95d368872af2c3b3abb88a773ff448149f526fb6
640,354
def _timestamp_to_string(timestamp): """Extracts the date from a pandas timestamp and convert to a string :returns string of the format: '20190908' (for September 8th, 2019) """ date_dashes = str(timestamp.date()).replace('-', '') return date_dashes
cec97899b65d81a03af21f17ad550f8a7cb17539
364,871
def is_reproducible(item): """Return True if the testcase is reproducible by checking the one_time_crasher_flag.""" return not item.one_time_crasher_flag
eede7695563a6d459c717298b3eace02ef163b05
566,509
def get_number_features(dict_features): """Count the total number of features based on input parameters of each feature Parameters ---------- dict_features : dict Dictionary with features settings Returns ------- int Feature vector size """ number_features = 0 for domain in dict_features: for feat in dict_features[domain]: if dict_features[domain][feat]["use"] == "no": continue n_feat = dict_features[domain][feat]["n_features"] if isinstance(n_feat, int): number_features += n_feat else: n_feat_param = dict_features[domain][feat]["parameters"][n_feat] if isinstance(n_feat_param, int): number_features += n_feat_param else: number_features += eval("len(" + n_feat_param + ")") return number_features
6f81c359cfee77896cb8e4334aa23cf977aaca5a
20,991
def flip_bit(number: int, position: int): """ Flip the bit at position. Details: perform bitwise xor for given number and X. Where X is a number with all the bits – zeroes and bit on given position – one. >>> flip_bit(0b101, 1) # 0b111 7 >>> flip_bit(0b101, 0) # 0b100 4 """ return number ^ (1 << position)
c68b1fe8a31b55fce27052d7c8affb06330e040f
545,752
import math def get_psnr(mse, max_value=255.0): """ Calculates PSNR value from given MSE value. :param mse: :param max_value: :return: """ if mse is None or mse == float('Inf') or mse == 0: psnr = 0 else: psnr = 20 * math.log(max_value / math.sqrt(mse), 10) return psnr
5f1ff971bdf304a1b28286730c041a1ef117ac74
270,669
def getStringForVariables(variables, wordsize): """ Takes as input the variable name, number of variables and the wordsize and constructs for instance a string of the form: x00, x01, ..., x30: BITVECTOR(wordsize); """ command = "" for var in variables: command += var + "," command = command[:-1] command += ": BITVECTOR({0});".format(wordsize) return command
1710ec9d8f71a48849df2548d4ff936ff909a43c
637,565
import ipaddress def is_valid_ip_address(ip_address): """ Check that the string specified appears to be either a valid IPv4 or IPv6 address. :param str ip_address: The ip address to validate. :return: Whether the ip address appears to be valid or not. :rtype: bool """ try: ipaddress.ip_address(ip_address) except ValueError: return False return True
02bb504dd958a5c823485adb1281009db4a3f103
361,819
def find_matching_resource(preview_resource, delivery_entry, search_field): """Returns matching resource for a specific field. :param preview_resource: Entry from the Preview API to match. :param delivery_entry: Entry to search from, from the Delivery API. :param search_field: Field in which to search in the delivery entry. :return: Entry from the Delivery API or None. """ if not delivery_entry: return None for delivery_resource in delivery_entry.fields().get(search_field, []): if preview_resource.id == delivery_resource.id and ( delivery_resource.type == 'Entry' or delivery_resource.type == 'Asset' ): return delivery_resource
518297f18a2dcd37bb226f96bd51370d7ed3c7e3
20,872
def NullCondition(wf, context): # pylint: disable=invalid-name,unused-argument """Null condition""" return True
745bd7c1718cb989ea4235c5dc076e63d4e0b24e
379,763
def get_wiki_arxiv_url(wiki_dump_url, href): """Return a full URL from the href of a .bz2 archive.""" return '{}/{}'.format(wiki_dump_url, href)
88c9dc1f62eabfc676e6833640e5c6a4263c916c
115,344
def sum_multiples(num1,num2,limit): """take two numbers and find the sum of their multiples from 1 to some upper limit. this DOES NOT double-count numbers which are multiples of BOTH num1 and num2.""" sum = 0 if num1 > limit and num2 > limit: return sum for i in range(1,(limit+1)): if i % num1 == 0 or i % num2 == 0: sum += i return sum
0966730d3ac650bf46914b6a5b76df62edd93a24
63,164
import re def parse_config(content): """Parse configuration options into a dict. Blank lines are ignored. Lines beginning with a hash mark (`#`) are comments, and ignored. Valid lines are made of an option's name (a sequence of non-blanks), followed by a value. The value starts with the first non-blank character after the option's name, and terminates at the end of the line, or at the last sequence of blanks before the end of the line. Option names are case-insensitive, and converted to lower-case. Arguments: content (str): The content of a configuration file to parse. Returns: dict: Parsed options. """ return { match.group(1).lower(): match.group(2) for match in ( re.match(r'^(\w+)\s+([^#]+\b)', line) for line in content.splitlines() ) if match }
38ee22168b8b1400e6f2a33f937fa947238513c9
404,762
def GetFirstTokenInSameLine(token): """Returns the first token in the same line as token. Args: token: Any token in the line. Returns: The first token in the same line as token. """ while not token.IsFirstInLine(): token = token.previous return token
5e25a502a4e9039b3544c29768bdf3553eb70a61
194,567
import torch def _tblr_pred_to_delta_xywh_pred(bbox_pred: torch.Tensor, normalizer: torch.Tensor) -> torch.Tensor: """Transform tblr format bbox prediction to delta_xywh format for ncnn. An internal function for transforming tblr format bbox prediction to delta_xywh format. NCNN DetectionOutput layer needs delta_xywh format bbox_pred as input. Args: bbox_pred (Tensor): The bbox prediction of tblr format, has shape (N, num_det, 4). normalizer (Tensor): The normalizer scale of bbox horizon and vertical coordinates, has shape (2,). Returns: Tensor: The delta_xywh format bbox predictions. """ top = bbox_pred[:, :, 0:1] bottom = bbox_pred[:, :, 1:2] left = bbox_pred[:, :, 2:3] right = bbox_pred[:, :, 3:4] h = (top + bottom) * normalizer[0] w = (left + right) * normalizer[1] _dwh = torch.cat([w, h], dim=2) assert torch.all(_dwh >= 0), 'wh must be positive before log.' dwh = torch.log(_dwh) return torch.cat([(right - left) / 2, (bottom - top) / 2, dwh], dim=2)
bdc924322f80b90f502f5a0aea9d1cfa057bc1bc
636,401
def update_width(vehicle, width): """ Updates the width of the vehicle :param vehicle: vehicle :param width: new vehicle width :type vehicle: VehicleProfile :type width: int :return: Updated vehicle """ return vehicle.update_width(width)
0b66b4b3cbfb6a30a22360b7f920344b57e19f49
145,161
def to_tf_matrix(expression_matrix, gene_names, tf_names): """ :param expression_matrix: numpy matrix. Rows are observations and columns are genes. :param gene_names: a list of gene names. Each entry corresponds to the expression_matrix column with same index. :param tf_names: a list of transcription factor names. Should be a subset of gene_names. :return: tuple of: 0: A numpy matrix representing the predictor matrix for the regressions. 1: The gene names corresponding to the columns in the predictor matrix. """ tuples = [(index, gene) for index, gene in enumerate(gene_names) if gene in tf_names] tf_indices = [t[0] for t in tuples] tf_matrix_names = [t[1] for t in tuples] return expression_matrix[:, tf_indices], tf_matrix_names
ee2214af5dd96454155e58761d69d35a4cf281b8
93,810
def ind(seq): """ Returns the integer indices of a Python list where the list values are true. tindex = Ngl.ind(plist) plist -- A Python list, tuple, or one-dimensional NumPy array. """ inds = [] for i in range(len(seq)): if (seq[i] != 0): inds.append(i) return(inds)
d8a9548c9b1a312f9789a4e9f3bdbe41507b4a69
408,562
import glob def find_harnesses(output_path): """Returns a list of all harness files found in the given directory.""" return glob.glob(output_path + "/*harness.c")
b64f1ec757b6deb07b25c529e418903e9ba499ef
265,705
from typing import Union from typing import Mapping from typing import Sequence def recursive_contains( obj: Union[Mapping, Sequence], *, keys: Sequence ) -> bool: """Checks whether the given keysequence is reachable in the ``obj``. Args: obj (Union[Mapping, Sequence]): The object to check recursively keys (Sequence): The sequence of keys to check for Returns: bool: Whether the key sequence is reachable """ if len(keys) > 1: # Check and continue recursion if keys[0] in obj: return recursive_contains(obj[keys[0]], keys=keys[1:]) # else: not available return False # else: reached the end of the recursion return keys[0] in obj
918ef387d0290ccba0faf846937f1689d07980ef
139,748
import importlib def load_backend(config): """ Returns the backend module from the given SocketShark configuration. """ backend_name = config.get('BACKEND', 'websockets') backend_module = 'socketshark.backend.{}'.format(backend_name) return importlib.import_module(backend_module)
d4bd3a55b5e63b1b5914cf3a8be69e57a8e4e33e
286,242
import math def _euclidean_dist(vector_a, vector_b): """ :param vector_a: A list of numbers. :param vector_b: A list of numbers. :returns: The euclidean distance between the two vectors. """ dist = 0 for (x, y) in zip(vector_a, vector_b): dist += (x-y)*(x-y) return math.sqrt(dist)
4b5b5fb742c75dfeac244a463f08360cfadfb5b1
380,941
import re def normalize_repeating_chars(text: str, *, chars: str, maxn: int = 1) -> str: """ Normalize repeating characters in ``text`` by truncating their number of consecutive repetitions to ``maxn``. Args: text chars: One or more characters whose consecutive repetitions are to be normalized, e.g. "." or "?!". maxn: Maximum number of consecutive repetitions of ``chars`` to which longer repetitions will be truncated. Returns: str """ return re.sub(r"({}){{{},}}".format(re.escape(chars), maxn + 1), chars * maxn, text)
0f7f53353feadc1efe52def4dd945f1bda86f3ae
334,802
def scale_columns(prefix,first,last): """ Create a list of scale parameter column names with name prefixX with X from first to last (inclusive) """ return [prefix+str(i) for i in range(first,last+1)]
b99411ff2c66147b0608c0870671ea6af68a25e8
611,668
import csv def read_bodyplan(file): """Read body plan from csv file. Args: file: path to file containing a bodyplan Returns: A list of L layers representing the model structure with layer keys: "layer", "n", "activation", "lreg", "regval", and "desc" indicating the layer index, integer number of units in layer, a string name of the activation function, the value (1 or 2) of the L-norm regularization to employ, the float regularization constant, and a string description of layer. """ #init bodyplan as empty list bodyplan = [] with open(file, 'r') as csvfile: reader = csv.reader(csvfile) keys = list(next(reader)) #get headers for line in reader: layer = {} layer["layer"] = int(line[keys.index("layer")]) layer["n"] = int(line[keys.index("n")]) layer["activation"] = str(line[keys.index("activation")]) if "regval" in keys: layer["regval"] = float(line[keys.index("regval")]) else: layer["regval"] = float(0) #default regularization parameter if "lreg" in keys: layer["lreg"] = int(line[keys.index("lreg")]) else: layer["lreg"] = int(1) #default regularization is L1 if "desc" in keys: layer["desc"] = str(line[keys.index("desc")]) else: layer["desc"] = 'fully connected' bodyplan.append(layer) return bodyplan
8de720133fd2e5f6c3b51451c937bf3a8671b52e
199,838
def pkcs7_pad(inp, block_size): """ Using the PKCS#7 padding scheme, pad <inp> to be a multiple of <block_size> bytes. Ruby's AES encryption pads with this scheme, but pycrypto doesn't support it. Implementation copied from pyaspora: https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209 """ val = block_size - len(inp) % block_size if val == 0: return inp + (bytes([block_size]) * block_size) else: return inp + (bytes([val]) * val)
5a5aae6f588e5e67dc30c85ab6a6afcdb9c728c0
25,083
def replace_nones(dict_or_list): """Update a dict or list in place to replace 'none' string values with Python None.""" def replace_none_in_value(value): if isinstance(value, str) and value.lower() == "none": return None return value items = dict_or_list.items() if isinstance(dict_or_list, dict) else enumerate(dict_or_list) for accessor, value in items: if isinstance(value, (dict, list)): replace_nones(value) else: dict_or_list[accessor] = replace_none_in_value(value)
318723616703761d7a4c683ac88c5923eff04e89
350,090
def ip2uint_str(ipv4_str): """Convert IPv4 string to 32-bit integer value""" parts = ipv4_str.split('.') if len(parts) != 4: raise ValueError('Expected IPv4 address in form A.B.C.D, got {}'. format(ipv4_str)) ip = [0]*4 for i, part in enumerate(parts): try: int_part = int(part) except ValueError: raise ValueError('Part {} of IPv4 address is not an integer'. format(i)) if int_part < 0 or int_part > 255: raise ValueError('Part {} of IPv4 address is not in range 0-255'. format(i)) ip[i] = int_part return (ip[0] << 24) + (ip[1] << 16) + (ip[2] << 8) + ip[3]
0298b557d79817260d0682f55819116638a1504d
248,354
def haskeys(d, *keys): """Returns True if all keys are present in a nested dict `d`.""" if len(keys) == 1: return keys[0] in d first = keys[0] if first in d and isinstance(d[first], dict): return haskeys(d[first], *keys[1:]) return False
3ccb57778c050534528d1c136082a4a7b54dcc1f
182,923
def check_true_false(variable): """ Checks if the string variable contains "true" or "false" else raises error Args: variable: (str) input variable to be checked """ if variable == "true" or variable == "false": return True else: raise TypeError
27d933865fb9eebf2969a651faf5c2af504a8358
521,350
import hashlib def make_password(service_provider_id, service_provider_password, timestamp): """ Build a time-sensitive password for a request. """ return hashlib.md5( service_provider_id + service_provider_password + timestamp).hexdigest()
eb68df2bdc5aac51f84341df085bf9320057eebb
70,864
def dedupe_matching(matching): """ Remove duplicates node pairs from the output of networkx.algorithms.max_weight_matching since we don't care about order. Args: matching (dict): output from networkx.algorithms.max_weight_matching. key is "from" node, value is "to" node. Returns: list[2tuples]: list of node pairs from `matching` deduped (ignoring order). """ matched_pairs_w_dupes = [tuple(sorted([k, v])) for k, v in matching.items()] return list(set(matched_pairs_w_dupes))
e6cb6f53e164e234bf3781ecfd81799a68324f20
429,154
def combine_predictions_list(predictions_list, index_list=None): """Combine predictions in predictions_list[index_list]. By taking the mean of their get_combineable_predictions views. E.g. for regression it is the actual predictions, and for classification it is the probability array (which should be calibrated if we want the best performance). Called both for combining one submission on cv folds (a single model that is trained on different folds) and several models on a single fold. Called by _get_bagging_score : which combines bags of the same model, trained on different folds, on the heldout test set _get_cv_bagging_score : which combines cv-bags of the same model, trained on different folds, on the training set get_next_best_single_fold : which does one step of the greedy forward selection (of different models) on a single fold _get_combined_predictions_single_fold : which does the full loop of greedy forward selection (of different models), until improvement, on a single fold _get_combined_test_predictions_single_fold : which computes the combination (constructed on the cv valid set) on the holdout test set, on a single fold _get_combined_test_predictions : which combines the foldwise combined and foldwise best test predictions into a single megacombination Parameters ---------- predictions_list : list of instances of Predictions Each element of the list is an instance of Predictions of a given model on the same data points. index_list : None | list of integers The subset of predictions to be combined. If None, the full set is combined. Returns ------- combined_predictions : instance of Predictions A predictions instance containing the combined (averaged) predictions. """ Predictions = type(predictions_list[0]) combined_predictions = Predictions.combine(predictions_list, index_list) return combined_predictions
90cbaf32bc3260fed2dd3460ecd2616312de5e85
149,041
from datetime import datetime def timestamp() -> str: """ Factory function returns an ISO formatted UTC timestamp. """ return datetime.utcnow().isoformat(timespec='seconds')
03033289528b0aebf022ee5ad59fa96643f04bc9
130,295
def reverse_words(input_str: str) -> str: """ Reverses words in a given string >>> sentence = "I love Python" >>> reverse_words(sentence) == " ".join(sentence.split()[::-1]) True >>> reverse_words(sentence) 'Python love I' """ return " ".join(reversed(input_str.split(" ")))
e8e5de0a89bfce165be481654774d960ee6de23a
301,794
import struct def ntohl(bs): """ Convert integer in 'n' from network-byte order to host-byte order. """ return struct.unpack('!I', bs)[0]
308de3050ebe02589c5bfc98c3e4cc2a859a496c
192,117
def checker(tree, spec, priority=0): """A decorator to install a check function: @check(tree, "foo/bar") def my_check(): ... will install my_check as a check under "foo" called "bar" in tree. Optionally privide the priority as the third argument (default is 0). """ assert "/" in spec (path, name) = spec.split("/") return lambda f: tree.add(path, priority, name, f)
3f6290419930cd35ae224a23813bcc1edebcc098
471,009
import random import bisect def generate_sector(size: int, object_weight: list) -> dict: """ Generates an Sector with Weighted Spawns Args: size: Int Representing the Size of the Sector (Size X Size) object_weight: An Nested List with Object / Value Types Examples: generate_sector(6, [["*", 50], ["#", 10]]) would output an Map File where * is far more Common than # Returns: An Dict with Lists inside which Represent the Map Data per Row """ if size is 0: raise ValueError("The Sector Size cant be 0") size += 1 output = {} placed_player = False totals = [] running_total = 0 for w in object_weight: running_total += w[1] totals.append(running_total) def next(): """ Gets an Random Object from the Object - Weight List """ ran = random.random() * totals[-1] i = bisect.bisect_right(totals, ran) return object_weight[i][0] for x in range(1, size): row = [] for y in range(1, size): object = next() if placed_player is False and object is "@": row.append(object) placed_player = True continue elif placed_player is True and object is "@": while object is "@": object = next() row.append(object) output[x] = row return output
514195b66c707b2e0dd67ea47b57fe56c1d28a86
707,764
def counters(stats): """ Count all_sentences all_questions all_questions_with_ans & all_corrects :param stats: list(quintet) :rtype int, int, int, int :return: all_sentences, all_questions, all_questions_with_ans, all_corrects """ # Initialization of counters. all_sentences = 0 all_questions = 0 all_questions_with_ans = 0 all_corrects = 0 # Parse stats and implement the addings. for sentences_num, questions_num, questions_with_ans, corrects, acc in stats: all_sentences += sentences_num all_questions += questions_num all_questions_with_ans += questions_with_ans all_corrects += corrects return all_sentences, all_questions, all_questions_with_ans, all_corrects
d6edc124e6254b11316a429c84664db1a223b352
688,946
def handle_red_or_blue_occurrence(numpy_grid, x_pos, y_pos): """ Function that handles the occurrence of finding a "red(2)" or "blue(1)" square in the input grid. If a Blue square is found, processes the grid and adds an orange square in set positions around it If a red square is found, processes the grid and adds a yellow square in set positions around it :param numpy_grid: The input grid :param x_pos: x position where the blue square was found :param y_pos: y position where the blue square was found :return: numpy_grid: containing correct output for single input grid >>> test_input_grid = np.array([[0, 0, 0],[0, 1, 0],[0, 0, 0]]) >>> handle_red_or_blue_occurrence(test_input_grid, 1, 1) array([[0, 7, 0], [7, 1, 7], [0, 7, 0]]) >>> test_input_grid = np.array([[0, 0, 0],[0, 2, 0],[0, 0, 0]]) >>> handle_red_or_blue_occurrence(test_input_grid, 1, 1) array([[4, 0, 4], [0, 2, 0], [4, 0, 4]]) """ if numpy_grid[x_pos, y_pos] == 1: numpy_grid[x_pos - 1, y_pos] = 7 numpy_grid[x_pos + 1, y_pos] = 7 numpy_grid[x_pos, y_pos + 1] = 7 numpy_grid[x_pos, y_pos - 1] = 7 elif numpy_grid[x_pos, y_pos] == 2: numpy_grid[x_pos - 1, y_pos - 1] = 4 numpy_grid[x_pos - 1, y_pos + 1] = 4 numpy_grid[x_pos + 1, y_pos - 1] = 4 numpy_grid[x_pos + 1, y_pos + 1] = 4 return numpy_grid
fc5cbf89604afd66aa6b00c87412259fdbf2c7e4
493,961
def semver(major_component, minor_component, patch_component): """Construct an SemVer-format version number. Args: major_component (int): The major component of the version number. minor_component (int): The minor component of the version number. patch_component (int): The patch component of the version number. Returns: str: A SemVer-format version number with the specified Major, Minor and Patch Components. """ return '.'.join([str(major_component), str(minor_component), str(patch_component)])
097e8a2fc721b93405d7cec35ab8ae8908d3dc8e
242,365
from typing import Dict def parse_match_result_data(text: str) -> Dict[str, str]: """ 勝敗結果データ ("3 - 1\n試合終了" や "- 試合前" など) をゴール数と状態に分けて返す フォーマットは、home_goal, away_goal, statusをキーとしたDict形式 """ # 3-1 のようにスペースが無いテキストが来てもOKなように text = text.replace('-', ' - ') result_list = text.split() if len(result_list) <= 3: # "- 試合前" スタイル home_goal = '' away_goal = '' # result_list[0] は '-' match_status = result_list[1] else: # "3 - 1\n試合終了" スタイル home_goal = result_list[0] # result_list[2] は '-' away_goal = result_list[2] match_status = result_list[3] return {'home_goal': home_goal, 'away_goal': away_goal, 'match_status': match_status}
8493c6f1a1516192ad40fa358f997bc6c23eaf89
487,317
import pickle def get_pickle_trajectory(path_to_file): """ Reads a trajectory as a `pickle` binary file. :param: path_to_file str the location for the input file :return: np.array An array of time points :return: np.array An array of (raw) location points, without noise :return: np.array An array of noisy locations :return: [dict] A list of segment dicts """ if path_to_file[-4:] != '.pkl': path_to_file += ".pkl" trajectory = pickle.load( open(path_to_file, "rb") ) segments = trajectory['segments'] t_arr = trajectory['time_arr'] r_arr = trajectory['raw_locs_arr'] x_arr = trajectory['nse_locs_arr'] return t_arr, r_arr, x_arr, segments
0e3f142cf43a170821a6c75dd6fd618462b7389a
287,009
def readable(df): """Take a dataframe and remove all columns that end in the suffix '_id'. The intended purpose of this function is to display a dataframe on the console showing only the readable columns i.e. not the identifiers. Parameters ---------- df : DataFrame A pandas dataframe. Returns ------- out : DataFrame A dataframe with the same structure as the input df with any columns ending in the suffix '_id' removed. """ readable_cols = list(filter(lambda c: not c.endswith('_id'), df.columns)) return df[readable_cols]
a856cdca3e3940a173c70c64196553100397749d
348,462
def colnum_zb_from_alphacol(alphacol: str) -> int: """ Reverses :func:`column_lettering`, generating a zero-based column index from an alphabetical name (A to Z, AA to AZ, etc.). """ base = 26 zero_char = ord("A") total = 0 reversed_chars = alphacol[::-1] for pos, char in enumerate(reversed_chars): digit_value = ord(char) - zero_char # e.g. 0 for A, 25 for Z assert 0 <= digit_value < base if pos > 0: digit_value += 1 total += digit_value * pow(base, pos) return total
9b39289b0cacde8b519fa28fbd6b6ad9953af113
150,213
from typing import Tuple import re import warnings def _parse_glibc_version(version_str: str) -> Tuple[int, int]: """Parse glibc version. We use a regexp instead of str.split because we want to discard any random junk that might come after the minor version -- this might happen in patched/forked versions of glibc (e.g. Linaro's version of glibc uses version strings like "2.20-2014.11"). See gh-3588. """ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) if not m: warnings.warn( "Expected glibc version with 2 components major.minor," " got: %s" % version_str, RuntimeWarning, ) return -1, -1 return int(m.group("major")), int(m.group("minor"))
547df79704865287060b07742a5a1fb0f589da3e
652,453
def create_pids2idxs(data_source): """Creates a mapping between pids and indexes of images for that pid. Returns: 2D List with pids => idx """ pid2imgs = {} for idx, (img, target, _) in enumerate(data_source.imgs): if target not in pid2imgs: pid2imgs[target] = [idx] else: pid2imgs[target].append(idx) return pid2imgs
3bad529a87dfaf62e8ebcd9373c413fc0bdc8ca3
93,608
def _keys(duthost, db, key_pattern): """Run Redis command keys over db on duthost.""" command = "redis-cli --raw -n {db} keys '{key_pattern}'".format(db=db, key_pattern=key_pattern) keys_result = duthost.shell(command) if not keys_result["stdout"].strip(): raise ValueError("No keys match key pattern {}".format(key_pattern)) return [line.strip() for line in keys_result["stdout_lines"]]
2431aed0bddc74fe2566cadf8c118d52711f540c
276,716
def df_regex_filter(df, column, regex): """Filter dataframe based on regex matching for specified column""" return df[df[column].str.contains(regex, regex=True, na=False)]
601e4238f43e9200b5c22f7d4ca150bd4676459f
548,272
def fuel_level(x, y, serial_number=0): """Calculate fuel level for the given coordinate(s).""" rack_id = x + 10 return (((rack_id * y + serial_number) * rack_id) // 100) % 10 - 5
3b9e460df436cde9d2f5277b2b9d8f3cfea745f6
460,580
def _is_valid_swift_module_name(string): """Returns True if the string is a valid Swift module name.""" if not string: return False for char in string: # Check that the character is in [a-zA-Z0-9_] if not (char.isalnum() or char == "_"): return False return True
a188864120318c7015a40f2aa8065d851e171c3c
408,529
def edge_frequency_dictionary(edges, freq = {}): """ Augments counters within graph G edges based on if they're contained within the edges list. Parameters ---------- edges : list list of edge tuples in the format (start node, end node, 0) freq : dictionary dictionary of frequencies, with edge tuples as keys in the format (start node, end node, 0) and frequency as integer values defaults to empty dictionary Returns ------- frequency : dictionary dictionary of frequencies, with edge tuples as keys in the format (start node, end node, 0) and frequency as integer values """ # iterate over edges, with each edge as a key for key in edges: if key in freq: freq[key] += 1 else: freq[key] = 1 return freq
f52de1afab57905e2bbdf2103f4de7c5a67dc32b
327,928
def aggregate_str_content(array_of_elements): """ Takes an array of DOM elements with text and merges their text content """ u = "" for h in array_of_elements: u+=h.text return u
5eb619ab73d8d3b14614618e21766f14aed35817
203,685
def argmin(list_obj): """Returns the index of the min value in the list.""" min = None best_idx = None for idx, val in enumerate(list_obj): if min is None or val < min: min = val best_idx = idx return best_idx
6b01fff02d9be5dc0bda6ad170f0b964b10373d9
625,388
def calculate_expected_duration(optimistic, nominal, pessimistic): """ Calculate the expected duration of a task. """ return round((optimistic + (4 * nominal) + pessimistic) / 6, 1)
31bf4aaf3afd582c9514e2b389bcbe5ccaec6959
474,499
def lte(value, arg): """Returns a boolean of whether the value is less than or equal to the argument. """ return value <= int(arg)
9e9848c3a1d0aae2dbe3bbfc29f1722b019e4143
568,329
def get_error(exception=None): """Gets errno from exception or returns one""" return getattr(exception, "errno", 1)
301bd618a7f9f0b4430e7d76cfe03f7ce1912f92
262,117
def get_contact_name(contact: dict) -> str: """Determine contact name from available first name, last naame, and email.""" contact_name = "" if contact["first_name"]: contact_name = f"{contact['first_name']} " if contact["last_name"]: contact_name += f"{contact['last_name']} " if contact_name: return f"{contact_name}({contact['email']})" return contact["email"]
14f4a3b25c370b327913fa7316abd1f452c380e0
131,083
def _find_neighborhoods_ids(input_list, neighborhoods_df): """ Read in a list of names of neighborhood names. Returns a list of neighbhorhood ids Args: input_list: a list containing the names of the neighborhoods in the treatment group. neighborhoods_df: a dataframe containing neighborhood names and object_id for each neighborhood. Returns: a list of neighborhood ids corresponding wiht the given names. """ id_list = [] for i, _ in enumerate(input_list): id_list = id_list + \ [int(neighborhoods_df[neighborhoods_df['S_HOOD'] == input_list[i]]['OBJECTID'])] return id_list
71ec994ce2e4fe28ec20a1e5604eea4159f8c172
628,228
def dict_lookup(d, value): """ Template filter that looks up a value from a dict. """ return d.get(value)
63c0b1aa4b2aef06d22613e05b07e0601407ac74
382,582
from bs4 import BeautifulSoup def text_card_factory(title: str, content: str, image_link: str = ""): """ Factory for text cards (the normal cards seen). Args: title (str): Title for the text card. content (str): The text content of the card, the meat of it. Automatically formats lines into a list. image_link (str, optional): The image on the slide. Defaults to "". Returns: _type_: the Beautifulsoup text card tag. """ soup = BeautifulSoup() card_tag = soup.new_tag("div") title_tag = soup.new_tag("h1") title_tag.string = title fixed_content = "<ul>" for line in content.splitlines(): fixed_content += "<li>" + line + "</li>" fixed_content += "</ul>" text_tag = BeautifulSoup(fixed_content, "html.parser") card_tag = soup.new_tag("div") card_tag.append(title_tag) content_table_tag = soup.new_tag("table") table_row = soup.new_tag("tr") text_table_data = soup.new_tag("td") text_table_data.append(text_tag) table_row.append(text_table_data) if image_link: # so that it correctly shares space with the image text_table_data["width"] = "50%" image_tag = soup.new_tag( "img", **{"class": "center", "src": image_link, "width": "50%"} ) img_td = soup.new_tag("td") img_td.append(image_tag) table_row.append(img_td) content_table_tag.append(table_row) card_tag.append(content_table_tag) return card_tag
ae45391fbd478b87adc0e905acbb3746f3e16fb1
316,852
def set_bit(v, index, x): """ Set the index:th bit of v to x, and return the new value. Note that bit numbers (index) are from 0, with 0 being the least significant bit. """ mask = 1 << index v &= ~mask if x: v |= mask return v
a36ffc3039d8157ad8227f3ffaa5c026345378e4
486,680
from typing import Any def is_true(expression: Any): """Returns true if the input expression evaluates to true""" return expression is True
114c369c21a9f1386c169a09333da7f6a9c065df
436,783
def name_value_str_handler(name): """ Return a generic handler for plain string fields. """ def handler(value, **kwargs): return {name: value} return handler
58c1805a9b081edef850661b7b30a49d044d1a2f
696,137
def to_tuple_inputs(inputs): """Transforms inputs list to a tuple of inputs Args: inputs (list): list of inputs passed from input_steps and input_data Returns: tuple: tuple from the list of inputs """ return tuple(inputs)
5b966a8c916417f5ed34d4d37f8eea74cd8f0a94
351,500
def bprop_add(x, y, dz): """Backpropagator for primitive `add`.""" return (dz, dz)
7b7ca4b03ad39b4efc066fb8de431c5c53273627
247,227
def create_edge(_source, _target, _label='', _edge_type=''): """Creates an edge whose id is "source_target". Parameters: _source (str): source node @id _target (str): target node @id _label (str): label shown in graph _edge_type (str): type of edge, influences shape on graph """ return { 'data': { 'id': f"{_source}__{_target}", '_label': f"\n\u2060{_label}\n\u2060", 'name': _label, 'source': _source, 'target': _target, '_edge_type': _edge_type }, 'classes': '' }
ac43deb5c1f09584bbd396d5aafdd7cc83e09418
358,195
def count_divisible_digits(n, m): """ This function takes two integer numbers n and m as an arguments and returns the number of digits in n that are divisible by m. """ if n < 0: n = n * -1 if m == 0: return 0 count = 0 while n != 0: digit = n % 10 remainder = digit % m if remainder == 0: count += 1 n = n // 10 return count
d0b2f78f6e3f153d410eead163fffc095a3bf184
606,857
def IsKillStep(chessboard: list, mv: list) -> bool: """ chessboard: current chessboard info [[x, y, class], [], []...] mv: AI move info, [x_src, y_src, x_dst, y_dst] return: BOOL: true if this step is kill step, false if normal step. """ for pc in chessboard: if mv[2] == pc[0] and mv[3] == pc[1]: return True else: continue return False
fe509decb980cda84dcba7eb757002dea6c87af1
68,679
def get_moview_profit(movie_record): """ Computes profit from movie, or returns None if not applicable """ if movie_record.gross is None or movie_record.budget is None: return None return movie_record.gross - movie_record.budget
1cf59cf0db522a9ea622d6e77361006941a33941
292,737
def binary_search(lst, pattern, offset=0): """ 二分查找 :param lst: 待查找序列(切片) :param pattern: 查找元素 :param offset: 切片下标偏移量(即从列表的offset元素开始查找匹配) :return: """ if not lst: return -1 middle = len(lst) // 2 if pattern == lst[middle]: return offset + middle elif pattern > lst[middle]: # 待查找元素大于序列中间值,递归待右侧序列查找 return binary_search(lst[middle + 1:], pattern, offset=offset+middle+1) else: return binary_search(lst[:middle], pattern, offset=offset)
7189f2b946276626571ed4844a575d70301a2c79
489,683
def get_token_list(text): """Returns a list of tokens. This function expects that the tokens in the text are separated by space character(s). Example: "ca n't , touch". This is the case at least for the public DiscoFuse and WikiSplit datasets. Args: text: String to be split into tokens. """ return text.split()
01a917fae5923cdfd693548bb688695a917fab70
702,115
import inspect def codeReturnsSomething(func): """ Checks whether a function/method appears to have a return statement that returns data. It simply examines the code for the function/method looking for a return call. :param func: Function to check :type func: function/method :return: Whether the function/method :rtype: bool """ sourceCode = inspect.getsource(func).split('\n') for line in sourceCode: tokens = line.strip().split() if len(tokens) >= 2 and (tokens[0] == 'return' or tokens[0] == 'yield'): return True return False
78be412b2f1214cfea8b01f4d12b99dccf5d2e7c
299,739
def read_files(country, filename): """read in a database of cities from a specific country and write it to a list of dictionaries""" file1 = open(filename, "r") country = [] line = "." while(line != ""): line = file1.readline() if(line == ""): break line = line.strip("\ufeff") splitline = line.split(",") city = splitline[0] province = splitline[1] latitude = splitline[2] longitude = splitline[3] timezone = splitline[4].strip("\n") entry = {} entry["city"] = city entry["province/state"] = province entry["latitude"] = latitude entry["longitude"] = longitude entry["timezone"] = timezone country.append(entry) file1.close() return country
2a40884fcd56b252822466b5d51662a0d815b636
260,038
def infer_n_hypers(kernel): """Infer number of MCMC chains that should be used based on size of kernel""" n_hypers = 3 * len(kernel) if n_hypers % 2 == 1: n_hypers += 1 return n_hypers
ae0e4a3cafbd8c052843585be8ea5c8209a3ee31
621,409
def Indent(spaces, line): """Indent a string.""" return ' ' * spaces + line
89a97c55bcacacaef2ca2b9538546b9703972ee6
329,248
def collection_2_listed_string(elems): """ Given a Container (i.e. a Set, List, etc.) of strings, returns an HTML string wherein each item has its own line, and all items are in alphabetical order. This will be used to help format and contsruct the email text body (as an HTML) in the build_email_msg function. """ # if the set is empty, output that no appointments are available if not elems: return 'None Available' + '<br>' # Initialize emtpy string string = "" # Sorts the set (converts to a list) sort = sorted(elems) # Loop over each city in the set and append the city to the HTML string for city in sort: string += city + '<br>' # Returns the HTML string of a list of cities return string
e21297d716ce6eded7523894821bd270f227f239
478,026
def find_eyes(landmarks): """ Helper method to find all key-value pairs in a dict whose keys reference 'left_eye' and 'right_eye' The dictionary should have string keys or it will return a None tuple :param landmarks: Dictionary with String keys referencing left and right eyes :return: a tuple of lists containing pixel locations of landmarks representing left_eye and right_eye """ left_eye = {} right_eye = {} for key in landmarks.keys(): if str(key).startswith("left_eye"): left_eye[key] = landmarks[key] elif key.startswith("right_eye"): right_eye[key] = landmarks[key] return left_eye, right_eye
d05dac4981a14ffa47c41f8b59249e777b006e6c
257,306
def is_absolute_url(uri): """ We have URLs pointing to Creative Commons licenses, starting with 'cc:', which for Linked Data purposes are absolute URLs because they'll be resolved into full URLs. >>> is_absolute_url('http://fr.wiktionary.org/wiki/mįkká’e_uxpáðe') True >>> is_absolute_url('/c/fr/nouveau') False """ return uri.startswith('http') or uri.startswith('cc:')
ebc70627e10e80e7b5ca105d624d30b6d050736d
243,659
def interaction_strength(idx: int, mol_map: dict, acceptor_exp: float) -> float: """Calculate interaction strength for atom with index `idx`.""" acceptor_strength = mol_map[idx]["sa"] num_lp = mol_map[idx]["num_lp"] if num_lp != 0: return acceptor_strength * (num_lp ** acceptor_exp) return 0.0
382b69e56002a24691fdc9e8b6943eeeeee7293a
45,471
def normalize(tensor, eps=1e-5): """ Normalize tensor to [0, 1] """ # eps=1e-5 is same as make_grid in torchvision. minv, maxv = tensor.min(), tensor.max() tensor = (tensor - minv) / (maxv - minv + eps) return tensor
ea9f780fc5f16840dfa25f69003c9637d2d610a8
182,027
def constant_thresholds(graph, value): """Sets a constant threshold for every node of the graph.""" # Store threshold assignment in a dictionary thresholds = dict() # Add a constant attribute to each node for node in graph.Nodes(): thresholds[node.GetId()] = value return thresholds
f8f2706b0860d5ad839d28745ba8616532b183e5
410,474
def get_q_rtd_H(q_rq_H, A_HCZ, f_cT, f_cI): """定格暖房能力 Args: q_rq_H(float): 単位面積当たりの必要暖房能力 A_HCZ(float): 暖冷房区画の床面積 f_cT(float): 外気温度補正係数 f_cI(float): 間歇運転能力補正係数 Returns: float: 定格暖房能力 """ return q_rq_H * A_HCZ * f_cT * f_cI
a7d7fb0e202a9f8b82364696bdf61f708cff826a
271,921
def path_actions(node): """The sequence of actions to get to this node.""" if node is None: return [] if node.parent is None: return [] return path_actions(node.parent) + [node.action]
e6d5a9e2dbcc34aa1aa4407f5c2fefeee0326f25
581,121
def train_network(model, data, epochs, loss_function, optimizer, display_loss=True): """ Train a pytorch model. Inputs: model: torch model, e.g., MLPRegressor() data: dict, contains at a minimum: {"x_train": <torch.Tensor>, "y_train": <torch.Tensor>, "x_val": <torch.Tensor>, "y_val": <torch.Tensor>} epochs: int, number of training epochs loss_function: torch loss function, e.g., torch.nn.MSELoss() optimizer: torch optimizer, e.g., torch.optim.Adam() display_loss: bool; If True, display the training/validation loss at each epoch. Defaults to True. Outputs: model: trained torch model """ for _ in range(epochs): y_pred = model.forward(data["x_train"]) loss = loss_function(y_pred, data["y_train"]) optimizer.zero_grad() loss.backward() optimizer.step() # compute validation loss y_pred_val = model(data["x_val"]) val_loss = loss_function(y_pred_val, data["y_val"]) if display_loss: print("training/validation loss", round(loss.item(), 3), "/", round(val_loss.item(), 3)) return model
8352ea390a0ecb8fe5392a51bfe31755dc3da10a
522,361
def list_clean(string): """ transforms a comma seperated string to a list, stripping whitespaces "HOME, WORK,pref" -> ['HOME', 'WORK', 'pref'] string: string of comma seperated elements returns: list() """ string = string.split(',') rstring = list() for element in string: rstring.append(element.strip(' ')) return rstring
49c188387e7a8d5f1f4195a81aecd5d639e7a590
204,621
def var_replace(vars, value): """Replace all instances of ${x} in value with the value of vars['x'].""" if value is None: return value for var, rep in vars.items(): if isinstance(rep, str): value = value.replace(f"${{{var}}}", rep) return value
176fefdbf92cbc28370c292bd582212adb0614ef
482,828
def _ts_data_by_index(train_ids, test_ids, data): """ Allow to get time series data by indexes of elements """ features = data.features[train_ids] target = data.target[test_ids] return features, target
0379ca1f1d5f1df46771863aa241c3b57a86667f
459,016
def check_max_errors(report_json): """checks whether the validation report indicates that validate hit its max error count and exited early""" max_errors = int(report_json.get('parameters').get('maxErrors')) errors_encountered = int(report_json.get('summary').get('totalErrors')) return max_errors == errors_encountered
1cc8ba1e92dd717e43860a8b0617dc4e6f75bcaf
475,614