content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _element_iterable(el): """ Test if a element is iterable Parameters ---------- el: object Returns ------- iterable: boolean if True then then el is iterable if Fales then not """ try: el[0] iterable = True except (TypeError, IndexError): iterable = False return iterable
844d5192a90a8fae5f557a099f221b446b6d8b8c
577,741
def isPower2(num): """ Check if num is power of two """ return ((num & (num - 1)) == 0) and num > 0
46160f29e78252f3e2e195d97c23d2b647450de5
683,195
import torch def random_boxes(num_boxes, max_coord=100, device="cpu"): """ Create a random Nx4 boxes tensor, with coordinates < max_coord. """ boxes = torch.rand(num_boxes, 4, device=device) * (max_coord * 0.5) boxes.clamp_(min=1.0) # tiny boxes cause numerical instability in box regression # Note: the implementation of this function in torchvision is: # boxes[:, 2:] += torch.rand(N, 2) * 100 # but it does not guarantee non-negative widths/heights constraints: # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: boxes[:, 2:] += boxes[:, :2] return boxes
bd415762f6bad17f4ea6a54c92b00844d3608869
554,858
import tempfile import glob import io def open_shards(glob_pattern, mode='rt', encoding='utf-8'): """Returns a composite file of all shards matching the given glob pattern. Args: glob_pattern (str): Pattern used to match files which should be opened. mode (str): Specify the mode in which the file should be opened. For available modes, check io.open() documentation. encoding (str): Name of the encoding used to decode or encode the file. This should only be used in text mode. Returns: A stream with the contents of the opened files. """ if 'b' in mode: encoding = None with tempfile.NamedTemporaryFile(delete=False) as out_file: for shard in glob.glob(glob_pattern): with open(shard, 'rb') as in_file: out_file.write(in_file.read()) concatenated_file_name = out_file.name return io.open(concatenated_file_name, mode, encoding=encoding)
29a588ed855fbd59b115f3ceef930096fe96ff06
404,513
def unicode_code(text): """ Finds the Unicode code points for the given characters, up to 10 characters. """ return "\n".join("{} **{}**".format(s, ord(s)) for s in text[:10])
a409cbcef852e75410c46b59618191a4c3f8192c
416,764
def learning_rate_decay(alpha, decay_rate, global_step, decay_step): """ Updates the learning rate using inverse time decay in numpy alpha is the original learning rate decay_rate is the weight used to determine the rate at which alpha will decay global_step is the number of passes of gradient descent that have elapsed decay_step is the number of passes of gradient descent that should occur before alpha is decayed further Returns: the updated value for alpha """ return alpha / (1 + decay_rate * (global_step // decay_step))
640f823ace92fdbd6fca198468864445a201b7e3
664,034
def twos_complement(hexstr,bits): """ twos_complement(hexstr,bits) Converts a hex string of a two's complement number to an integer Args: hexstr (str): The number in hex bits (int): The number of bits in hexstr Returns: int: An integer containing the number """ value = int(hexstr,16) if value & (1 << (bits-1)): value -= 1 << bits return value
9b5558ddfeaac5de4285a9c1feb3d2a3f23bb83a
543,258
from typing import Dict from typing import Any def subdict(cols: Dict[str, Any], subkeys: list) -> Dict[str, Any]: """Take a dictionary and subset it based on a list of keys. Args: full_dict (Dict[str, Any]): The full dictionary to be subsetted subkeys (list): list of keys to be contained in the subset Returns: Dict[str, Any]: A subsetted dictionary """ return {key: cols[key] for key in subkeys}
72f5ae909909a13dd7c2ec312b3159313bf3b01b
65,617
from typing import List def intersect(nums1: List[int], nums2: List[int]) -> List[int]: """Given two arrays of integers, write a function to compute their intersection. From `leetcode <https://leetcode.com/problems/intersection-of-two-arrays-ii/>` Note: - Each element in the result should appear as many times as it shows in both arrays. - The result can be in any order. :param nums1: {List[int]} :param nums2: {List[int]} :return: {List[int]} intersection of two arrays """ output = [] for n in nums1: if n in nums2: output.append(n) nums2.remove(n) return output
e3582117673b9f5a8eddd8a1ce39399328f5b60e
216,282
def sec_url(period): """ Create url link to SEC Financial Statement Data Set """ url = "".join([ "https://www.sec.gov/files/dera/data/financial-statement-data-sets/", period, ".zip" ]) # handle weird path exception of SEC if period == "2020q1": url = "".join([ "https://www.sec.gov/files/node/add/data_distribution/", period, ".zip" ]) return url
8d38a8dca62a7fd23a04130bd37aed1ed9ae34a0
682,411
def key_released(self, keyboard, keycode): """ Stop left and right movement. """ self.speed_x = 0 return True
e62e3bd52340bedf433a3c09e54fb9548345617d
557,400
def choose_grid(n, columns=None, rows=None, max_diff=None): """ Return a square-ish grid size that contains n panels. Optionally can specify number of columns or rows. Unless max_diff is set, prefer exactly divisible rectangles over squares. Parameters ---------- n : int number of panels columns : int fixed number of columns rows : int fixed number of rows max_diff: int maximum difference between rows - columns. E.g. 24 can be exactly (6, 4), but if max_diff is 1, then (5, 5) is returned. Returns ------- rows: int cols: int """ if rows and columns: return rows, columns if rows: fixed = rows max_diff = None elif columns: fixed = columns max_diff = None else: # this is columns, make it such that the rectangle is more narrow than tall fixed = int(n ** 0.5) floated = n // fixed if floated * fixed < n: floated += 1 if max_diff and floated - fixed > max_diff: floated -= 1 fixed += 1 return (fixed, floated) if rows else (floated, fixed)
e9227a263a60898c62040fe42fa688cc0e94018a
453,299
def compare_graphs(before, after): """ Compare two (sub)graphs. Note: a == b != b == a! :param before: A networkx (sub)graph. :param after: A networkx (sub)graph. :returns: A dict with changes. """ res = {'added': [], 'removed': [], 'added_edge': [], 'removed_edge': [], 'chg_attr': []} for node in after.nodes(): if node not in before.nodes(): # add missing nodes if node not in res['added']: res['added'].append(node) for link in after.out_edges([node]): if link[1] not in before.nodes() \ and link[1] not in res['added']: res['added'].append(link[1]) res['added_edge'].append(link) else: # already there... if before.node[node]['attributes'] != \ after.node[node]['attributes']: res['chg_attr'].append((node, before.node[node]['attributes'], after.node[node]['attributes'])) for node in before.nodes(): if node not in after.nodes(): res['removed'].append(node) for link in before.out_edges([node]): res['removed_edge'].append(link) else: # node exists lets check the edges. for link in after.out_edges([node]): if link not in before.out_edges([node]): res['added_edge'].append(link) for link in before.out_edges([node]): if link not in after.out_edges([node]): res['removed_edge'].append(link) return res
ee89a35da1bb74a42c93787ec138ee416a04c831
651,777
def snake_to_pascal(string: str) -> str: """ Converts snake-case to pascal-case. >>> snake_to_pascal(string="hello_and_good_morning") # Returns "HelloAndGoodMorning" """ words = string.split('_') words_capitalized = list(map(str.capitalize, words)) return "".join(words_capitalized)
9f5262978ea1c087bcaa90ead3e330fde6f0e2b1
611,373
def _GenerateEstimatorConstructor(estimator_class_name, variable_types, variable_names, extension_class_name): """ Generates the consructor for the estimator class. """ code = ["\n\npublic {0}(IHostEnvironment env".format(estimator_class_name)] # Generate the Constructor parameters for variable_type, variable_name in zip(variable_types, variable_names): code.append(", {0}.TransformParameter<{1}> {2}".format(extension_class_name, variable_type, variable_name)) code.extend( [ ", string outputColumn", ")\n{" ] ) # Generate assigning the values in the constructor for variable_name in variable_names: code.append("\n_{0} = {0};".format(variable_name)) # Add assignments that are always required code.extend( [ "\n_outputColumn = outputColumn;", "\n_host = env.Register(nameof({0}));".format(estimator_class_name), "\n}" ] ) return "".join(code)
19366e1e25befa2e0723604d31f0f59b602b9b51
15,829
def rgb_to_dec(value): """ Converts rgb to decimal colours (i.e. divides each value by 256) value: list (length 3) of RGB values Returns: list (length 3) of decimal values""" return [v / 256 for v in value]
f01229a6ed0dd8ea5f160dc779258ebcde2b1a3a
402,568
def get_accuracy(y_pred, y_test): """ Get the prediction accuracy, which is number of correct predictions / number of all predictions. :param y_pred: :param y_test: :return: prediction accuracy """ good = 0 for i, pred in enumerate(y_pred): if pred == y_test[i]: good += 1 return (good + 0.0) / len(y_pred)
7783a2bcbf0d5e48a830da52d9bff1c5f461eba1
512,345
def _get_amazon_review_text(row): """Gets the Amazon review text given row data. # Arguments row: pandas row data from Amazon review dataset. # Returns: string, text corresponding to the row. """ title = '' if type(row[1]) == str: title = row[1].replace('\\n', '\n').replace('\\"', '"') body = '' if type(row[2]) == str: body = row[2].replace('\\n', '\n').replace('\\"', '"') return title + ', ' + body
e4573c530fd00bc411a5fbc96e59629c1fd64f0f
171,356
def block_comment(s: str, documentation: bool = True) -> str: """Block comment, optionally documentation format.""" start = '{-|' if documentation else '{-' s_ = s[:-1] if s == '\n' else s return f'{start} {s_}\n\n-}}\n'
52a5ee74d93aeaae30f646a4bda37051db0f7ddb
448,976
def biased_rolls(prob_list, s, n): """ Simulate n rolls of a biased m-sided die and return a list containing the results. Arguments: prob_list: a list of the probabilities of rolling the number on each side of the m-sided die. The list will always have the length m (m >= 2), where m is the number of sides numbered 1 to m. Therefore, for example, the probability stored at index 0 in the list is the probability of rolling a 1 on the m-sided die. s: the seed to use when initializing the PRNG n: the number of rolls to return Return: rolls: a list (of length n) containing each of the n rolls of the biased die, in the order they were generated. """ # replace this line and solve the problem! rolls = None # return the resulting rolls return rolls
ed3e55086c6590c7eab7b797d788197016f37cca
362,423
def constant(x, amp): """ Constant model :param x: Dispersion :type x: np.ndarray :param amp: Amplitude of the constant model :type amp: float :return: Constant model :rtype: np.ndarray """ return amp + 0 * x
ff305df4d575774fc5c259b749d8577389c2da0f
438,489
def get_overall_misclassifications(H, training_points, classifier_to_misclassified): """Given an overall classifier H, a list of all training points, and a dictionary mapping classifiers to the training points they misclassify, returns a set containing the training points that H misclassifies. H is represented as a list of (classifier, voting_power) tuples.""" misclassified_points = [] for point in training_points: output = 0 for classifier in H: if point in classifier_to_misclassified[classifier[0]]: output += classifier[1]*-1 else: output += classifier[1]*1 if output <= 0: misclassified_points.append(point) return set(misclassified_points)
9439ed620f982f68da115ce82056c059052364c7
558,616
def get_storage_conn_string(hostname, account_name, account_key): """Returns the connection string of a local storage account containing only blob endpoint and HTTP protocol""" blob_endpoint = "%s/%s" % (hostname, account_name) conn_string = "DefaultEndpointsProtocol=http;BlobEndpoint=%s;AccountName=%s;AccountKey=%s;" % (blob_endpoint, account_name, account_key) return conn_string
55ad94d498e215c478f76b6a1e4902bf0287ab21
292,468
def plot3dOnFigure(ax, pixels, colors_rgb,axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))): """Plot pixels in 3D.""" # Set axis limits ax.set_xlim(*axis_limits[0]) ax.set_ylim(*axis_limits[1]) ax.set_zlim(*axis_limits[2]) # Set axis labels and sizes ax.tick_params(axis='both', which='major', labelsize=14, pad=8) ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16) ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16) ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16) # Plot pixel values with colors given in colors_rgb ax.scatter( pixels[:, :, 0].ravel(), pixels[:, :, 1].ravel(), pixels[:, :, 2].ravel(), c=colors_rgb.reshape((-1, 3)), edgecolors='none') return ax
067219abba7f77f7c4fbb4404ff16a3f5192f7cd
708,745
from functools import reduce from operator import mul def doubleFactorial(n): """ Returns double factorial of an integer. """ return reduce(mul, range(n, 0, -2))
43e79b8f43bd4e63806f24c3847fd447e86b3bf9
690,242
import logging def log_level_to_constant(loglevel): """Convert human readable log level to logging constant""" return getattr(logging, loglevel)
68bd2365b404979d18261b290c18349050392f53
68,660
def right_digit(x): """Returns the right most digit of x""" return int(x%10)
3f52393e9241714839e97a41f858753485cc5c89
27,983
def get_clk_hrow_and_rebuf_tiles_sorted(cur): """ Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles. returns them in a list sorted according to their Y coordinates. """ cur.execute( """ SELECT name FROM phy_tile WHERE name LIKE "CLK_HROW_BOT_R_%" OR name LIKE "CLK_HROW_TOP_R_%" OR name LIKE "CLK_BUFG_REBUF_%" ORDER BY grid_y DESC; """ ) return [t[0] for t in cur.fetchall()]
7cfd6005dc8b8d7830386cd7c6d4dab600a8a0d2
614,771
def parse_args(parser_): """ Parse commandline arguments. """ parser_.add_argument('--images_txt_path', type=str, default="../data/image/images.txt", help='image text') parser_.add_argument('--labels_txt_path', type=str, default="../data/image/labels.txt", help='label') return parser_
8f0d86b02a4d2d1a77a36f5936d3946f4f3262f0
451,587
def subclasses(cls, abstract=False, private=False): """Return the subclasses of class `cls` as a dict. If abstract, include classes with abstract methods. If private, include private classes. """ return { sc.__name__: sc for sc in cls.__subclasses__() if (abstract or not sc.__abstractmethods__) and ( private or sc.__name__[0] != '_') }
a31e9736840976267cadaae3924902edf292b963
261,506
from textwrap import dedent def dedent_strip_nodetext_formatter(nodetext, has_options, caller=None): """ Simple dedent formatter that also strips text """ return dedent(nodetext).strip()
36db3cf7bb286eb3b45dcc5c402fdf1dac4d82dc
124,111
import re def match_numbered_leaf(leaf_to_match, search_string): """ Find a numbered leaf matching a tf.Operation and return it if found. Example: 'Conv2D' will match '.../Conv2D_5' and return 'Conv2D_5' """ expr = '/({0})|/({0}_[0-9]+)'.format(leaf_to_match) match = re.search(expr, search_string) if match is not None: return match.group(1)
f7b2307cf45f1be8e80c7fc272ed1bb449781faf
427,098
def restricted_utc_time_from_datetime(date): """Convert given ``datetime.datetime`` object `date` to an restricted ASN.1 UTC time string. """ if date.tzinfo is not None: date -= date.utcoffset() return date.strftime('%y%m%d%H%M%S') + 'Z'
418da1cc4d5eea8955a4a8263f803a4c1610c635
571,670
def _get_nested(dict_, keys): """ Nested get method for dictionaries (and lists, tuples). """ try: for key in keys: dict_ = dict_[key] except (KeyError, IndexError, TypeError): return None return dict_
58e7487b189c477fdbb4a59f00e2ac6f6665a1b9
428,554
def no_op(ctx, node, name, args): """Skip node.""" return None
1fede015a843657f3959bb8da4c2216a8674e60c
9,253
def get_mentions(status_dict, exclude=[]): """ Given a status dictionary, return all people mentioned in the toot, excluding those in the list passed in exclude. """ # Canonicalise the exclusion dictionary by lowercasing all names and # removing leading @'s for i, user in enumerate(exclude): user = user.casefold() if user[0] == "@": user = user[1:] exclude[i] = user users = [user["username"] for user in status_dict["mentions"] if user["username"].casefold() not in exclude] return users
9253cc5eb65e2d42ffdce3b5c01251d76fd94a40
315,044
import ast def nodeLines(node): """ Get the line number range of a node Args: node: an ast node Returns: (start line #, end line # + 1) """ min_lineno = node.lineno max_lineno = node.lineno for node in ast.walk(node): if hasattr(node, "lineno"): min_lineno = min(min_lineno, node.lineno) max_lineno = max(max_lineno, node.lineno) return [min_lineno, max_lineno + 1]
c36047f026f82ae6d124c16fef1c9e3d78549515
300,139
def rotToQuat(obj): """ return the rotation of the object as quaternion""" if obj.rotation_mode == 'QUATERNION' or obj.rotation_mode == 'AXIS_ANGLE': return obj.rotation_quaternion else: # eurler return obj.rotation_euler.to_quaternion()
d869fede5bf3fa34282a94e3b22939ec08488fd0
117,052
import shutil def copy(source, destination): """ Copy file to destination, returns true/false. :param str source: File to copy. :param str destination: Destination file :returns: True if successed :rtype: bool example:: success = xbmcvfs.copy(source, destination) """ try: shutil.copyfile(source, destination) except shutil.Error: return False else: return True
8712889ca7a86e16f8f85cfdf5cd62df5c93531a
182,206
def extend_dict(dict_1: dict, dict_2: dict) -> dict: """Assumes that dic_1 and dic_2 are both dictionaries. Returns the merged/combined dictionary of the two dictionaries.""" return {**dict_1, **dict_2}
88e2f1c613181046b388092677b8ee627c432d4d
672,211
import time def date_dff(date_a, date_b): """ :params: :param date_a: time.struct_time :param date_a: time.struct_time :return: the difference in minutes between two dates :rtype: float """ return abs((time.mktime(date_a) - time.mktime(date_b)) / 60)
716ed9b273d3eec1a8efe21615fb389b4fb08651
373,201
def get_num_articles_words(issue): """ Give an issue, gets a tuple with the number of articles and total number of words in the articles. :param issue: issue :type issue: defoe.papers.issue.Issue :return: (1, num_articles, num_words) :rtype: tuple(int, int, int) """ num_words_per_article = [len(article.words) for article in issue.articles] num_words = sum(num_words_per_article) return (1, len(issue.articles), num_words)
088ae2eebb9877c6963db7f4f51677c02726ff0b
463,697
def tagseq_to_entityseq(tags: list) -> list: """ Convert tags format: [ "B-LOC", "I-LOC", "O", B-PER"] -> [(0, 2, "LOC"), (3, 4, "PER")] """ entity_seq = [] tag_name = "" start, end = 0, 0 for index, tag in enumerate(tags): if tag.startswith("B-"): if tag_name != "": end = index entity_seq.append((start, end, tag_name)) tag_name = tag[2:] start = index elif tag.startswith("I-"): if tag_name == "" or tag_name == tag[2:]: continue else: end = index entity_seq.append((start, end, tag_name)) tag_name = "" else: # "O" if tag_name == "": continue else: end = index entity_seq.append((start, end, tag_name)) tag_name = "" return entity_seq
1fb1ce7ff7266a961ba84188bcbf9df2c92ba650
81,158
def SplitFieldName(qualified_field_name): """Splits the field name on '/' and returns the parts separately. Args: qualified_field_name: the possibly fully qualified field name Returns: tuple with namespace and raw field name. Defaults to global for no namespace. """ field_only = qualified_field_name namespace = '' split = qualified_field_name.split('/') if len(split) == 2: namespace = split[0] field_only = split[1] return namespace, field_only
4bf4f5778cc519e6c29f17a92eceb1bc0db99720
591,245
def generate_ngram(text, n=3): """ Generate n-gram frequency table for given text. """ occurences = ngram = dict() for i in range(len(text) - n): try: cur = text[i:i+n] if cur in occurences: occurences[cur] += 1 else: occurences[cur] = 1 except IndexError: pass for (key,value) in occurences.items(): ngram[key] = float(value) / (len(text) - n + 1) return ngram
c165d9cf6d0711467e146ab38f98b8481c466f2d
464,436
def root_(radicand, index, printed=False): """Get the nth root of a number.""" # Example: The square root of a number can be gotten by raising that # number to the power of 1/2. if radicand < 0: root = f'{(radicand*-1)**(1/index)}i' else: root = radicand**(1/index) if printed: print(root) else: return root
c4c01e534b232296a600ce6283dacfdafd033df6
144,917
def get_cluster_name(cid: int): """ Return cluster name from cluster ID Parameters ---------- cid : int Cluster ID Returns ------- string The cluster name """ if cid == 0: return 'cluster' else: return 'cluster_%d' % (cid)
c22088baec06ff9d311716d9d41d6983cddd3aeb
261,268
def RetrieveValue(Dictionary, IndexPath): """ WARNING: This function is for internal use. Enter dictionary recursively using IndexPath and return leaf value. """ if IndexPath == []: return Dictionary else: return RetrieveValue(Dictionary[IndexPath[0]], IndexPath[1:])
93b97de799b6262e0a78f79292cb56ddfb785013
400,687
import codecs def escaped_str_to_bytes(data): """ Take an escaped string and return the unescaped bytes equivalent. Raises: ValueError, if the escape sequence is invalid. """ if not isinstance(data, str): raise ValueError("data must be str, but is {}".format(data.__class__.__name__)) # This one is difficult - we use an undocumented Python API here # as per http://stackoverflow.com/a/23151714/934719 return codecs.escape_decode(data)[0]
4399346e84a4feafdeddce2e25959c05f99e2fc1
317,926
def exclude_bck(df): """ Exclude background/target sentences that don't have any domain labels. """ print(f"Before excl: {len(df)=}") crit1 = "(background_sent or target_sent)" crit2 = "labels.astype('str') == '[0, 0, 0, 0, 0, 0, 0, 0, 0]'" to_excl = df.query(f"{crit1} and {crit2}") df = df.loc[~df.index.isin(to_excl.index)] print(f"After excl: {len(df)=}") return df
126d7df7e5a95eff7d98772d2a938a06582ce845
303,149
def _get_range_clause(column, value, bucket_interval): """Returns an SQL clause specifying that column is in the range specified by value. Uses bucket_interval to avoid potentially ambiguous ranges such as 1.0B-1.9B, which really means [1B, 2B). """ if value[0] == '-': # avoid minus sign with split arr = value[1:].split('-', 1) arr[0] = '-' + arr[0] else: arr = value.split('-', 1) if len(arr) > 1: low = arr[0] high = arr[1] else: return column + " = " + value if low.endswith('M'): low = int(round(float(low[:-1]) * 1000000)) high = low + bucket_interval elif low.endswith('B'): low = int(round(float(low[:-1]) * 1000000000)) high = low + bucket_interval elif '.' not in low: low = int(low) high = low + bucket_interval # low is inclusive, high is exclusive # See https://github.com/elastic/elasticsearch-dsl-py/blob/master/elasticsearch_dsl/faceted_search.py#L125 return column + " >= " + str(low) + " AND " + column + " < " + str(high)
7b0e9da8fa1ac9365e93ccd1137d519f08dadbed
28,000
def remove_final_whitespace(string): """ Return a copy of *string* with final whitespace removed from each line. """ return '\n'.join(x.rstrip() for x in string.split('\n'))
136692e32aa14ec607c908e7cd455e7885d6d463
98,912
def removeEverythingEqualTo(array, value): """ Remove everything in an array equal to a value """ while (True): try: array.remove(value) except ValueError: return array
8040a8b16fbf272a0593713114ca813343df903c
244,218
def remove(text, removeValue): """ Return a string where all remove value are removed from the text. """ return text.replace(removeValue, '')
cfc7f5f5ab212bea6e02423fd0fb1fc2947be7a2
696,434
def good_str(x): """Returns a safe (escaped) version of the string. str -- The string""" return repr(str(x))[1:-1]
ff67037b8cbf177218e0cd98fd7353c2e108f2a3
280,401
def build_signature(function_name, function_parameters): """ Given the function name and function parameters, returns the signature line :param function_name: the name of the function :param function_parameters: the parameters of the function (whatever would be within the brackets) """ return "def {}({}):".format(function_name, function_parameters)
921fa5260a7b8ffc887c711219052e0a4bb4a7de
613,860
def binary_search(array, val): """Binary search.""" sorted_array = sorted(array) i = 0 j = len(array) - 1 while i <= j: mid = (i + j) // 2 if sorted_array[mid] == val: return mid if sorted_array[mid] < val: i = mid + 1 else: j = mid - 1
e0dc1386f9b5c2f8896df0c92419b125ff566add
650,994
def _get_nodes_without_in_edges(graph): """Get all nodes in directed graph *graph* that don't have incoming edges. The graph is represented by a dict mapping nodes to incoming edges. Example: >>> graph = {'a': [], 'b': ['a'], 'c': ['a'], 'd': ['b']} >>> _get_nodes_without_in_edges(graph) ({'a'}, {'b': set(), 'c': set(), 'd': {'b'}}) :param graph: A dict mapping nodes to incoming edges. :return: The set of nodes without incoming edges and the graph with these nodes removed. """ nextlevel = set() for node, deps in graph.items(): if not deps or deps == {node}: nextlevel.add(node) filtered_graph = {} for node, deps in graph.items(): if node in nextlevel: continue filtered_graph[node] = \ {dep for dep in deps if dep not in nextlevel} return nextlevel, filtered_graph
367a7c136e795f768a96bb6b013a09f0fb6ed967
684,614
import io def spit(path, txt, encoding='UTF-8', append=False): """ Write a unicode string `txt` to file `path`. By default encoded as UTF-8 and truncates the file prior to writing Parameters ---------- path : str File path to file on disk txt : unicode Text content to write to file encoding : str, default `UTF-8`, optional Encoding of the file append : Boolean, default False Append to file instead of truncating before writing Returns ------- The txt written to the file as a unicode string """ mode = 'a' if append else 'w' with io.open(path, mode, encoding=encoding) as f: f.write(txt) return txt
75b406f81068006aba61f34395590b8a1948a469
656,229
def plot_points(plot_dict): """ dict -> list, list Takes a dictionary and returns the key-value pairs as x and y lists to be plotted. """ x = [] y = [] for key in plot_dict: x.append(key) y.append(plot_dict[key]) return x, y
0c164eba54aa3e6dc551dda8b2af662aa11b0231
543,004
import numbers import warnings def check_threshold(threshold, data, percentile_func, name='threshold'): """ Checks if the given threshold is in correct format and within the limit. If necessary, this function also returns score of the data calculated based upon the given specific percentile function. Note: This is only for threshold as string. Parameters ---------- threshold: float or str If threshold is a float value, it should be within the range of the maximum intensity value of the data. If threshold is a percentage expressed in a string it must finish with a percent sign like "99.7%". data: ndarray an array of the input masked data. percentile_func: function {scoreatpercentile, fastabspercentile} Percentile function for example scipy.stats.scoreatpercentile to calculate the score on the data. name: str, optional A string just used for representing the name of the threshold for a precise error message. Returns ------- threshold: number returns the score of the percentile on the data or returns threshold as it is if given threshold is not a string percentile. """ if isinstance(threshold, str): message = ('If "{0}" is given as string it ' 'should be a number followed by the percent ' 'sign, e.g. "25.3%"').format(name) if not threshold.endswith('%'): raise ValueError(message) try: percentile = float(threshold[:-1]) except ValueError as exc: exc.args += (message, ) raise threshold = percentile_func(data, percentile) elif isinstance(threshold, numbers.Real): # checks whether given float value exceeds the maximum # value of the image data value_check = abs(data).max() if abs(threshold) > value_check: warnings.warn("The given float value must not exceed {0}. " "But, you have given threshold={1} ".format(value_check, threshold)) else: raise TypeError('%s should be either a number ' 'or a string finishing with a percent sign' % (name, )) return threshold
0233ceb1988ff8a523739752bb846a395ff3182a
495,914
def clean_mongo_key(key): """mongodb dict keys cannot contain periods or start with a $""" key = key.replace(".", "_") if key.startswith("$"): key = key.replace("$", "_", 1) return key
47382763f4cde9edc7e520f2541014712d34e115
160,166
from typing import Callable from typing import List def get_random_weights(items, key: Callable[[int], float]) -> List[float]: """Returns the random weights of the items for the random function Parameters ---------- items : List[Item] list of items to find weights of key : function : int -> float function that takes in rarity and returns the drop rate of the item Returns ------- List[float] the list of weights of the items """ weights = [] for i in range(len(items)): weights.append(key(items[i].rarity)) return weights
923e8a45e8f4eff15f222bfd101d5ed4b6d49a7d
387,210
def permute_observation(obs, perm): """Given a permutation, shuffle pixels of the observation.""" return obs.flatten()[perm].reshape(obs.shape)
ac18bce7d344b89cbcba8ea22ebcb92ff3f9c0e9
35,812
def get_achievement(dist): """Получить поздравления за пройденную дистанцию.""" # В уроке «Строки» вы описали логику # вывода сообщений о достижении в зависимости # от пройденной дистанции. # Перенесите этот код сюда и замените print() на return. if dist >= 6.5: return 'Отличный результат! Цель достигнута.' elif dist >= 3.9: return 'Неплохо! День был продуктивным.' elif dist >= 2: return 'Маловато, но завтра наверстаем!' else: return 'Лежать тоже полезно. Главное — участие, а не победа!'
c71a804806322e24dd0c4ac4ab6524ac15dd9552
676,129
from typing import Union def _task_id(job: str) -> Union[int, str]: """Tries to extract an integer task ID from a job name. For example, for `job` = '/.../tpu_worker/0:port_name', return 0. Args: job: A job name to extract task ID from. Returns: The task ID on success, or the original job name on failure. """ maybe_task_id = job.rsplit("/")[-1].rsplit(":")[0] try: return int(maybe_task_id) except ValueError: return job
8abc0f87654d3ac6bfa7359cfa58fc0fc6b4b1b6
175,734
def problem_2_5(node): """ Given a circular linked list, implement an algorithm which returns the node at the beginning of the loop. DEFINITION Circular linked list: A (corrupt) linked list in which a node’s next pointer points to an earlier node, so as to make a loop in the linked list. EXAMPLE input: A -> B -> C -> D -> E -> C [the same C as earlier] output: C SOLUTION: - start two node traversals, one moving one node at a time, the other two nodes at a time. - if they meet, then the list has a loop, otherwise no loop. - the place where they meet is k nodes aways from the start of the loop, where k is the number of nodes from the begining of the list to the start of the loop. - move one traverser on the begining of the list - move both traversers one node at a time until they meet, this is the start of the loop. """ n1 = node # moves one node at a time. n2 = node # moves two nodes at a time. # Advance to the meeting point or reach the end of the list. while n1 != None: n1 = n1.next # This works all the time, we are given that the list has a loop. n2 = n2.next.next if n1 == n2: break # If either n1 or n2 are None, then we found no loop. if n1 == None or n2 == None: return None # Find the starting point of the loop. n1 = node # n2 is still in the point where they met. while n1 != n2: n1 = n1.next n2 = n2.next return n1
90028102aaeb71607cfccf5583218a8c735dc09a
115,841
def _crc_update(crc, data, mask, const): """ CRC8/16 update function taken from _crc_ibutton_update() function found in "Atmel Toolchain/AVR8 GCC/Native/3.4.1061/ avr8-gnu-toolchain/avr/include/util/crc16.h" documentation. @param[in] crc current CRC value @param[in] data next byte of data @param[in] mask CRC mask, must be 0xff for CRC8 and 0xffff for CRC16 @param[in] const CRC polynomial constant @returns Next CRC value either 8 or 16 bit depending on mask """ crc = (crc & mask) ^ (data & 0xff) for _ in range(8): if crc & 1: crc = (crc >> 1) ^ const else: crc >>= 1 return crc & mask
6538b1720faca541b3ab741451e0dc9dcd20c77c
634,678
from datetime import datetime def sample_seconds_to_time(seconds): """Convert the seconds float to a time string.""" return datetime.utcfromtimestamp(seconds).strftime('%H:%M:%S.%f')
96ca11c4d03fe22e1160dfd787b99ee35ed9ec69
351,169
import builtins def _parse_warning(warn_str): """Reverse-engineer a warning string Parameters ---------- warn_str : string Returns ------- (str, Warning, str, int) message, category, filename, lineno """ tokens = warn_str.rstrip('\n').split(" ") # The first token is # "[filename]:[lineno]:" filename, lineno, _ = tokens[0].split(':') # The second token is # "[category name]:" category = getattr(builtins, tokens[1][:-1], RuntimeWarning) message = " ".join(tokens[2:]) return message, category, filename, int(lineno)
803826059f3bd2cfdd84d2ec7bdf0ef9c97b5eaa
646,879
import unicodedata def char_category(char): """Return the character's unicode category""" return unicodedata.category(char)[0]
5bd0b92bc025d50ae0a9ea193dd08e076e17ec28
312,480
def _encoded_str_len(l): """ Compute how long a byte string of length *l* becomes if encoded to hex. """ return (l << 2) / 3 + 2
c4656413acae81d21246aecd6a6ad624483839f4
105,745
import math def angle_energy(theta, fc, theta0): """ Calculate the angle energy using the harmonic potential. Args: theta (float): angle between atoms [degrees] fc (float): force constant [kcal/mol] theta0 (float): equilibrium angle [degrees] Returns: e_angle (float): energy of angle [kcal/mol] """ theta = math.pi/180.0 * theta # degrees to radians theta0 = math.pi/180.0 * theta0 # degrees to radians return 0.5 * fc * (theta - theta0)**2
d39f35fd56d45b12a625d7856d0bac9e2fe762d3
550,449
def enumeration(*args): """ Return a value check function which raises a value error if the value is not in a pre-defined enumeration of values. If you pass in a list, tuple or set as the single argument, it is assumed that the list/tuple/set defines the membership of the enumeration. If you pass in more than on argument, it is assumed the arguments themselves define the enumeration. """ assert len(args) > 0, 'at least one argument is required' if len(args) == 1: # assume the first argument defines the membership members = args[0] else: # assume the arguments are the members members = args def checker(value): if value not in members: raise ValueError(value) return checker
a16d8a6bd7b8beff7cb3db54e1dce0885a5fdb1f
243,355
def get_rnn_hidden_state(h): """Returns h_t transparently regardless of RNN type.""" return h if not isinstance(h, tuple) else h[0]
e860cae5f12cb1cde7e3fe127105ebda50c81d05
288,256
from typing import Dict def get_test_mode_enabled(env: Dict[str, str]) -> bool: """Get the test mode, which is used only for unit testing. Note that the env value has a '.' in it, which should be only settable through explicit os.environ setting.""" return env.get('TEST.MODE', 'x') == 'unit-test'
e7ce438806723601f212f3d1ee3332fb4fd9d2c8
494,137
def ping(event): """Responds 'pong' to your 'ping'.""" return 'pong'
48d07ca1e513b28212c56758c41e1757b5f75468
697,340
def make_template(channel, width): """ Returns a template of the specified width, with its 25% position being at where the lower-level driver triggered. """ pos = int(round(channel.shape[0] * .35/(.35+.25) - width*.25)) return channel[pos:pos + width], pos
92a9b4c7e3396b8a30f017f4ff5ee223ba588d51
135,866
def drop_series_w_gaps(df, series_id, date_col, target, max_gap=1, output_dropped_series=False): """ Removes series with missing rows df: pandas df series_id: str Column name with series identifier date_col: str Column name of datetime column target: str Column name of target column max_gap: int number of allowed missing timestep output_dropped_series: bool (optional) allows return of pandas df of series that do not satisfy max_gap criteria Returns: -------- pandas df(s) """ if not isinstance(max_gap, int): raise TypeError('max gap must be an int') df.sort_values(by=[date_col, series_id], ascending=True, inplace=True) series_max_gap = df.groupby([series_id]).apply(lambda x: x[date_col].diff().max()) median_timestep = df.groupby([series_id])[date_col].diff().median() series_to_keep = series_max_gap[(series_max_gap / median_timestep) <= max_gap].index.values sampled_df = df.loc[df[series_id].isin(series_to_keep), :] dropped_df = df.loc[~df[series_id].isin(series_to_keep), :] if output_dropped_series: return sampled_df, dropped_df else: return sampled_df
e50a615c5459089aea33cdfa644d0aa1f97ec7ae
457,767
import pickle def readArrayPkl(fn): """Read a pickle file, expected format is a NxM numpy array """ fh=open(fn,'rb') im=pickle.load(fh) fh.close() return im
40b412f2c7af4335de1e6665a6060f02bd7b66a1
488,553
def data2_eq(data2): """ Returns a function that matches to the specified data2 value. """ return lambda m: m.data2 == data2
4d71f6f88f6ea264dbf1058a200f9700b8ce3574
51,131
def get_fraction_of_tweets_in_language(tweets): """Returns fraction of languages in a tweet dataframe as a dictionary Args: tweets (pandas.DataFrame): Tweet DataFrame as returned by `get_latest_tweets` Returns: language_fractions (dict): {languagecode (str): fraction (float)} """ language_fractions = tweets['lang'].value_counts(normalize=True) language_fractions = language_fractions.to_dict() return language_fractions
f6725f5ef4dc448eb82e0bf958eb10ea4c0bd0dd
121,230
import json def open_config(config_name): """opens the given config file (path as string) and returns the data (dictionary) stored in it""" with open(config_name, 'r') as configfile: data = json.load(configfile) configfile.close() return data
6e6b6c9f4fcd81f3b8480deb088475e5dd0aaa70
405,997
import math def natural_log(x): """Finds the natural log of some number""" return math.log(x)
94c2b1fb72b7adc420261c7ce9113900dc4df055
398,466
from typing import MutableMapping def test_mock_nested_dict() -> MutableMapping: """Mock nested dictionary.""" return {"a": 1, "c": {"a": 2, "b": {"x": 5, "y": 10}}, "d": [1, 2, 3]}
29b12b28f02238ef91767af235d865ec04a19568
634,138
def prepend_all(stack, list): """ Prepend all of the items in the given stack onto the head of the given list. (Naturally tail-recursive.) For example: prepend_all([3, 2, 1], [4, 5]) -> [1, 2, 3, 4, 5] """ if stack == (): return list else: head, tail = stack return prepend_all(tail, (head, list))
a68ca7ae428ddee25331e1bfaa3abfb134c8303b
579,772
import operator def _normalize_slice_or_index(index, length): """ Normalize a slice or index for use with __delitem__ or __setitem__. For slices with positive step, returns a slice that's equivalent for the purposes of __delitem__ and __setitem__ operations. For slices with negative step, a normalized slice representing the reverse of the given slice is returned: note that in this case, the matching *added* and *removed* lists will need to be reversed. Slices with a step of 1 or -1 are normalized to a single integer index, referring to the position of the first element referenced by the slice. Similarly, slices that refer to only a single element of the corresponding list (for example, a slice of `[1::10]` applied to a list of length 5) are normalized to the index that refers to that same element. Empty slices are also normalized to a single index. Note that in the case of an empty slice, the corresponding __delitem__ or __setitem__ operation does not cause any list change, so does not issue a notification. So the normalized index in this case is unused in current code. A normalized slice will have 0 <= start < stop <= length and a step >= 2. It should further satisfy start + step < stop. The stop will always be one larger than the last element referenced by the slice. For a plain integer index, it's assumed -length <= index < length on input (but this is not explicitly checked). A normalized output index will satisfy 0 <= index <= length. Parameters ---------- index : slice or integer The slice to normalize length : int The length of the list to which the slice will be applied. Returns ------- reversed : bool True if the returned slice is in the opposite direction to the original, else False. normalized_index : slice or integer An equivalent (or reversed equivalent) normalized slice or index. """ if not isinstance(index, slice): index = operator.index(index) return False, index + length if index < 0 else index start, stop, step = index.indices(length) reversed = step < 0 if reversed: start, stop, step = ( min(stop - step + (start - stop) % step, length), start + 1, -step, ) # Reduce stop so that equivalent slices give identical normalized # slices (e.g., del x[3:7:2] is equivalent to del x[3:6:2]). stop -= (stop - start - 1) % step # For a step of 1, a single item, or an empty slice, return a simple index. if step == 1 or stop - start <= step: return reversed, start else: return reversed, slice(start, stop, step)
dfcd9d27db3e53faa2304ce6a4109221f138287d
293,556
def automatic_nch(f_min, f_max, spacing): """How many channels are available in the spectrum :param f_min Lowest frequenecy [Hz] :param f_max Highest frequency [Hz] :param spacing Channel width [Hz] :return Number of uniform channels >>> automatic_nch(191.325e12, 196.125e12, 50e9) 96 >>> automatic_nch(193.475e12, 193.525e12, 50e9) 1 """ return int((f_max - f_min) // spacing)
6973ccfeaeba578bb9f6492ed06117161da81ef5
562,247
def container_instance_to_string(id): """Create an id string from a ContainerInstance""" return '%s_%d' % (id.service_name, id.instance)
b24d4fec4de29aca79a8c0df19e32c6f1c4d5e24
388,404
import math def euclidean_distance(p, q): """ Computes the distance between two points p and q. """ return math.sqrt((p[0] - q[0])**2 + (p[1] - q[1])**2)
6e460743b8171e1ca8ca780e26c3b5e440dd4b79
670,561
def l2_norm(lst): """ Calculates the l2 norm of a list of numbers """ return sum([x*x for x in lst])
4324b34ecdf255909d0949cd2adb6a9ecd184c75
337,608
def usage(err=''): """ Prints the Usage() statement for the program """ m = '%s\n' %err m += ' Default usage is to test the basic API calls.\n' m += ' ' m += ' sfEntity -cq -eCase CaseNumber like 25455 \n' m += ' or\n' m += ' sfEntity -cq -eTask BranchLabel__c like %_michel% \n' m += ' or\n' m += ' sfEntity -ce all (to return list of valid entities)\n' return m
2a80c3b1e08df3d53f0bf6340265087acf808e62
186,005
def beale(x): """ 2D function, global minimum at (3,.5) """ x,y = x.ravel() return (1.5-x+x*y)**2+(2.25-x+x*y**2)**2+(2.625-x+x*y**3)**2
cae937f3a207d97c987935c64d272f8441d31826
167,465
def str2cat(category): """Return a category name starting with Category.""" prefix = "Category:" if not category.startswith(prefix): category = "%s%s" % (prefix, category) return category.replace(' ', '_')
bc269ef77296acc716514dc3840609a253f450f0
296,549
import torch def _batched_dotprod(x: torch.Tensor, y: torch.Tensor): """ Takes two tensors of shape (N,3) and returns their batched dot product along the last dimension as a tensor of shape (N,). """ return torch.einsum("ij,ij->i", x, y)
96c9a402e9684d850add04aa24621e1ce7cc5626
126,928
def copy_meta(df_source, df_target): """Internal metadata copy tool Args: df_source (DataFrame): Original dataframe df_target (DataFrame): Target dataframe; receives metadata Returns: DataFrame: df_target with copied metadata """ df_target._grouped_by = getattr(df_source, "_grouped_by", None) df_target._plot_info = getattr(df_source, "_plot_info", None) df_target._meta = getattr(df_source, "_meta", None) return df_target
330e62f6aa7ef79fc6e4366dac6334a835b0cebd
407,685
def parseHostPort(host, defaultport, missingportok=False): """Parse host[:port] string and tuples Specify 'host[:port]' or a (host, port) tuple for the mandatory argument. If the port specification is missing, the value of the defaultport is used. On wrong input, this function will raise a ValueError. """ if isinstance(host, (tuple, list)): host, port = host elif ':' in host: host, port = host.rsplit(':', 1) else: port = defaultport if not missingportok and port is None: raise ValueError('a valid port is required') if port is not None: try: port = int(port) except ValueError: raise ValueError('invalid port number: ' + port) from None if not 0 < port < 65536: raise ValueError('port number out of range') if ':' in host: raise ValueError('host name must not contain ":"') return host, port
12baf8bc4947947451a5cc47a7a9fbf4aa4f3b2d
469,207
def calculate_macd_and_spreadvssignal(my_verbose, this_data, cols_to_calculate): """ Gets full dataset, and calculates MACD and spread between MACD and signal Important to use full dataset to ensure columns are same between training and test data. Parameters ---------- my_verbose : string parameter to control printing of steps and plotting of outlier graphs this_data : dataframe The training + validation + test dataset cols_to_calculate : list All columns aside from target variable. Target var is highly correlated with one of Gold futures, thus not necessary to include Returns ------- this_data A dataframe containing additional MACD and MACD-Signal spread per feature """ for this_col in cols_to_calculate: exp1 = this_data[this_col].ewm(span=12, adjust=False).mean() exp2 = this_data[this_col].ewm(span=26, adjust=False).mean() macd = exp1-exp2 signal = macd.ewm(span=9, adjust=False).mean() macd_signal_spread = macd - signal this_data["{}_macd".format(this_col)] = macd.values #.tolist() this_data["{}_macd_signal_spread".format(this_col)] = macd_signal_spread.values #.tolist() if my_verbose==True: print("\nMACD and spread computed") #### transformed_data.to_csv(r'MACD.csv') return this_data
3b58dfcef6f597dab410db03ac91d36b13aad5d9
347,983
import re def normalize_en(s): """ Processes an English string by removing non-alphabetical characters (besides .!?). """ s = s.lower().strip() s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^\w.!?]+", r" ", s, flags=re.UNICODE) return s
41fb32f8967a550b750c4ff6ca54cc59381529be
367,433
def get_list_depth(list_) -> int: """ Get the number of nesting of list """ if isinstance(list_, list) and len(list_) >= 1: return 1 + max(get_list_depth(item) for item in list_) else: return 0
4d48c8316c63befa5a0ee29535ddc38de1ecfac8
494,611