content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def sort_lists(sorted_indices, list_to_sort): """ given a list of indices and a list to sort sort the list using the sorted_indices order :param sorted_indices: a list of indices in the order they should be e.g. [0,4,2,3] :param list_to_sort: the list which needs to be sorted in the indice order from sorted_indices :return: sorted list """ return [list_to_sort[i] for i in sorted_indices]
215bc8f29125dcd327b608c0b9d12f7614bd3403
696,134
def sum_of_coordinates(point): """Given a 2D point (represented as a Point object), returns the sum of its X- and Y-coordinates.""" return point.getX() + point.getY()
afe177220460366aae35e9012119281d887c247e
696,136
def name_value_str_handler(name): """ Return a generic handler for plain string fields. """ def handler(value, **kwargs): return {name: value} return handler
58c1805a9b081edef850661b7b30a49d044d1a2f
696,137
from typing import AnyStr import json def load_json_dict(text: AnyStr) -> dict: """Loads from JSON and checks that the result is a dict. Raises ------ ValueError if `text` is not valid JSON or is valid JSON but not a dict """ ans = json.loads(text) if not isinstance(ans, dict): raise ValueError('not a dict') return ans
3f35c6eed694f8b8087a7ee1252ef9fa99864280
696,138
import re def remove_superscript_numbers_in_passage(text): """ A helper function that removes all superscript numbers with optional trailing space from a given string. Mainly used to hide passage numbers in a given block of text. :param text: String to process :type text: str :return: String with the superscript numbers that have a trailing space removed :rtype: str >>> remove_superscript_numbers_in_passage('⁰ ¹ ² ³ ⁴ ⁵ ⁶ ⁷ ⁸ ⁹ ') '' >>> remove_superscript_numbers_in_passage('E=mc²') 'E=mc' """ return re.sub(r'[⁰¹²³⁴⁵⁶⁷⁸⁹]+\s?', '', text)
767630743a8a55e1bcb139dc303ffbd778734690
696,139
def default(input_str, name): """ Return default if no input_str, otherwise stripped input_str. """ if not input_str: return name return input_str.strip()
1ba52bbe9118513a0f77abffdbcf3835c5335b35
696,140
def trapz(x, y): """Trapezoidal integration written in numba. Parameters ---------- x : array_like sample points to corresponding to the `y` values. y : array_like Input array to integrate Returns ------- total : float Definite integral as approximated by the trapezoidal rule. """ n = x.shape[0] total = 0 for i in range(n - 1): total += 0.5 * (y[i] + y[i + 1]) * (x[i + 1] - x[i]) return total
316d97cc8703dc28c8821c68e21e25bf144a8a54
696,142
def find_sum_subseq(nums, target): """ Let `nums` be a list of positive integers and let `target` be a positive integer Find a contiguous subsequence (of length at least 2) in `nums` whose sum is `target` and return the subsequence indices for slicing if exists, else return False We slide a variable sized window across the `nums` array and track the cumulative sum of array vals ensuring the window always has length at least 2. As `nums` contains only positive integers, adding an element to the end of the window always increases the array sum, whilst removing an element from the start of the window always decreases the array sum. """ low, high = 0, 2 cum_sum = nums[0] + nums[1] while high < len(nums): # Check if the current subsequence (of length at least 2) sums to `target` if cum_sum == target: return low, high # If the cumulative sum is too low or our subsequence has length 2, add another element elif cum_sum < target or high - low == 2: cum_sum += nums[high] high += 1 # Otherwise the cumulative sum exceeds the target and we can remove an element else: cum_sum -= nums[low] low += 1 # Check if we found a suitable subsequence on the last iteration return (low, high) if cum_sum == target else False
eb141ec0892d1d2baf256910e0368704ccda8299
696,143
def key_gen(params): """Generates a fresh key pair""" _, g, o = params priv = o.random() pub = priv * g return (pub, priv)
c554fdcda209d591ac952ea43a69163f0448dd28
696,144
def fill_na(symbols_map, symbols_list): """Fill symbol map with 'N/A' for unmapped symbols.""" filled_map = symbols_map.copy() for s in symbols_list: if s not in filled_map: filled_map[s] = 'N/A' return filled_map
2aafe8e083e23938b002b4dc4d0ad39401cd66fb
696,145
def is_dummy_vector(vector, vector_length=None): """ Return True if the vector is the default vector, False if it is not. """ if vector_length is None: vector_length = len(vector) return vector == [1e-7] * vector_length
0d53a6e9a6fdc7d382ad57630fdbf5e73a32507a
696,147
def key_matches_x509_crt(key, crt): """ Verify that the public key derived from the given private key matches the private key in the given X.509 certificate. :param object key: A private key object created using load_privkey() :param object crt: An X.509 certificate object created using load_x509() :rtype bool: True, iff the key matches the certificate """ return crt.public_key().public_numbers() == key.public_key().public_numbers()
f3a9cd3cbfc9df9d0095c0562c3251174a98c141
696,155
def _clean_header(text: str, is_unit: bool = False) -> str: """ Extract header text from each raw trajectory summary csv file header. :param text: Raw trajectory summary csv column header text. :param is_unit: If True, return text with brackets for units. :returns: Formatted text. """ # Return an empty string if there is no header found if "Unnamed" in text: return "" # Removes additional spaces and hashtags from text. Add brackets optionally. clean_header = " ".join(text.replace("#", "").split()) if is_unit: clean_header = f" ({clean_header})" return clean_header
000ab01267e78d621fd8a8e6844523e7fa909ba4
696,156
def get_conv_type(data): """ Get the convolution type in data. """ conv_type = data.conv_type.iloc[0] assert (data.conv_type == conv_type).all() return conv_type
5b66291790b921a917643cf3a3b2a96fc3c58243
696,160
def bedToMultizInput(bedInterval): """ Generate the proper input for fetching multiz alignments input: pybedtools Interval class output: chr, start, stop, strand """ chromosome = bedInterval.chrom chromosome = chromosome.replace("chr", "") if bedInterval.strand == "+": strand = 1 else: strand = -1 return(chromosome, bedInterval.start, bedInterval.stop, strand)
1f8492872144c301314bcc217948e0449b242340
696,165
def find_with(f, iter, default=None): """Find the value in an iterator satisfying f(x)""" return next((x for x in iter if f(x)), default)
8fac8902b8baaf1a28a83227bef3933a7b8cb293
696,168
def athinput(filename): """Read athinput file and returns a dictionary of dictionaries.""" # Read data with open(filename, 'r') as athinput: # remove comments, extra whitespace, and empty lines lines = filter(None, [i.split('#')[0].strip() for i in athinput.readlines()]) data = {} # split into blocks, first element will be empty blocks = ('\n'.join(lines)).split('<')[1:] # Function for interpreting strings numerically def typecast(x): if '_' in x: return x try: return int(x) except ValueError: pass try: return float(x) except ValueError: pass try: return complex(x) except ValueError: pass return x # Function for parsing assignment based on first '=' def parse_line(line): out = [i.strip() for i in line.split('=')] out[1] = '='.join(out[1:]) out[1] = typecast(out[1]) return out[:2] # Assign values into dictionaries for block in blocks: info = list(filter(None, block.split('\n'))) key = info.pop(0)[:-1] # last character is '>' data[key] = dict(map(parse_line, info)) return data
2fb9f499ff75fc3b61afd655baea7b92132fabcb
696,169
def vals_sortby_key(dict_to_sort): """ sort dict by keys alphanumerically, then return vals. Keys should be "feat_00, feat_01", or "stage_00, stage_01" etc. """ return [val for (key, val) in sorted(dict_to_sort.items())]
4c5537dc555d92b1f4084821aa56cfb118d2a871
696,170
def Delta(a, b, gapopen = -0.5, gapext = -0.7): """ Helper function for swalignimpconstrained for affine gap penalties """ if b > 0: return 0 if b == 0 and a > 0: return gapopen return gapext
303ee6d4436f3338ea130416459c22d40aff0272
696,172
def _get_byte_size_factor(byte_suffix: str) -> int: """ Returns the factor for a specific bytesize. """ byte_suffix = byte_suffix.lower() if byte_suffix == "b": return 1 if byte_suffix in ("k", "kb"): return 1024 if byte_suffix in ("m", "mb"): return 1024 * 1024 if byte_suffix in ("g", "gb"): return 1024 * 1024 * 1024 if byte_suffix in ("t", "tb"): return 1024 * 1024 * 1024 * 1024 raise ValueError("Unsupported byte suffix")
eb7b0aaf03c6b231306980568fc93a45303d022b
696,178
import math def constrained_factorial(x): """ Same as `math.factorial`, but raises `ValueError` if x is under 0 or over 32,767. """ if not (0 <= x < 32768): raise ValueError(f"{x!r} not in working 0-32,767 range") if math.isclose(x, int(x), abs_tol=1e-12): x = int(round(x)) return math.factorial(x)
5bb853a479279aa124a271d9ff719e060bd18608
696,179
from typing import Any def default_function(n: int, value: Any=None): """ Creates a dummy default function to provide as default value when a func parameter is expected. `n` is the number of parameters expected. `value` is the default value returned by the function """ if n == 0: return lambda: value elif n == 1: return lambda _: value elif n == 2: return lambda _,__: value else: raise Exception('Default function with {} parameters is not supported.'.format(n))
d8245fed39e423392acfbffd775379a2e15a8848
696,184
def byte_literal(b): """ If b is already a byte literal, return it. Otherwise, b is an integer which should be converted to a byte literal. This function is for compatibility with Python 2.6 and 3.x """ if isinstance(b, int): return bytes([b]) else: return b
88756b37de6884b3e68373756af76e849815786f
696,187
def _get_extent(gt, cols, rows): """ Return the corner coordinates from a geotransform :param gt: geotransform :type gt: (float, float, float, float, float, float) :param cols: number of columns in the dataset :type cols: int :param rows: number of rows in the dataset :type rows: int :rtype: list of (list of float) :return: List of four corner coords: ul, ll, lr, ur >>> gt = (144.0, 0.00025, 0.0, -36.0, 0.0, -0.00025) >>> cols = 4000 >>> rows = 4000 >>> _get_extent(gt, cols, rows) [[144.0, -36.0], [144.0, -37.0], [145.0, -37.0], [145.0, -36.0]] """ ext = [] xarr = [0, cols] yarr = [0, rows] for px in xarr: for py in yarr: x = gt[0] + (px * gt[1]) + (py * gt[2]) y = gt[3] + (px * gt[4]) + (py * gt[5]) ext.append([x, y]) yarr.reverse() return ext
95dfa01251925522b282450219d1d040f928f405
696,188
def switch(*args): """:yaql:switch Returns the value of the first argument for which the key evaluates to true, null if there is no such arg. :signature: switch([args]) :arg [args]: mappings with keys to check for true and appropriate values :argType [args]: chain of mapping :returnType: any (types of values of args) .. code:: yaql> switch("ab" > "abc" => 1, "ab" >= "abc" => 2, "ab" < "abc" => 3) 3 """ for mapping in args: if mapping.source(): return mapping.destination()
c0d152b4004866826c4892a1340865b79feefa2c
696,194
def extract_segment_types(urml_document_element, namespace): """Return a map from segment node IDs to their segment type ('nucleus', 'satellite' or 'isolated'). """ segment_types = \ {namespace+':'+seg.attrib['id']: seg.tag for seg in urml_document_element.iter('nucleus', 'satellite')} for seg in urml_document_element.iter('segment'): seg_id = namespace+':'+seg.attrib['id'] if seg_id not in segment_types: segment_types[seg_id] = 'isolated' return segment_types
30d2050055a9c2e66da66e3663df27a9cc6852e1
696,196
def convert2voxels(x_um_rw, imExtends, voxelSize): """ Converting from real world um coordinates to 0 origin voxel. :param x_um_rw: coordinates in real world frame, dimensions in um :param imExtends (list of lists): the first list are the initial extends of the image, and the second list the final ones. Dimensions are um and they are used to localize the image in the real world frame :param voxelSize: voxel size :return: coordinates in 0 centered frame, dimensions in voxels """ # First we bring the coordinates origin to 0 x_um_0 = x_um_rw - imExtends[0] # And then we transform the dimensions to voxels X_voxel_0 = x_um_0 / voxelSize return X_voxel_0
123414615e40bb41802b8f5f072bb994f859f3d7
696,197
def fill_big_gaps(array, gap_size): """ Insert values into the given sorted list if there is a gap of more than ``gap_size``. All values in the given array are preserved, even if they are within the ``gap_size`` of one another. >>> fill_big_gaps([1, 2, 4], gap_size=0.75) [1, 1.75, 2, 2.75, 3.5, 4] """ result = [] if len(array) == 0: raise ValueError("Input array must be len > 0") last_value = array[0] for value in array: while value - last_value > gap_size + 1e-15: last_value = last_value + gap_size result.append(last_value) result.append(value) last_value = value return result
11ecb164b9e54c75db249ca27cbbdd582ed47945
696,201
from pathlib import Path def get_package_root() -> Path: """Returns package root folder.""" conf_folder = Path(__file__).parent.parent dirs_in_scope = [x.name for x in conf_folder.iterdir() if x.is_dir()] if "mapservices" not in dirs_in_scope: msg = ( f"Not the right root directory. ({conf_folder.absolute()}) " "Did you change the project structure?" ) raise ValueError(msg) return conf_folder
7ff402b510528f7256ee6033ecbe6d5054bf487d
696,202
def thrift_attrs(obj_or_cls): """Obtain Thrift data type attribute names for an instance or class.""" return [v[1] for v in obj_or_cls.thrift_spec.values()]
7c4f75f0e00ca08ca8d889d537ceee355c4c6552
696,207
def find_one_or_more(element, tag): """Return subelements with tag, checking that there is at least one.""" s = element.findall(tag) assert len(s) >= 1, 'expected at least one <%s>, got %d' % (tag, len(s)) return s
6faee2e8cad1a943da6499cb0ab987cafa79104a
696,209
def find_max_simultaneous_events(events): """ Question 14.5: Given a list of intervals representing start and end times of events, find the maximum number of simultaneous events that we can schedule """ transitions = [] simultaneous = 0 max_simultaneous = 0 for event in events: transitions.append((event[0], True)) transitions.append((event[1], False)) sorted_transitions = sorted(transitions, key=lambda x: x[0]) for transition in sorted_transitions: if transition[1]: simultaneous += 1 else: simultaneous -= 1 max_simultaneous = max(simultaneous, max_simultaneous) return max_simultaneous
ffacabf17aa89dc61903a1aab44bace923f224a4
696,210
import math def distance(x1, y1, x2, y2): """distance: euclidean distance between (x1,y1) and (x2,y2)""" return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
b7b4662a88c9afd4b63d6ab04fc51749916749f1
696,214
def repeated_definitions_of_repo_in_config(config, repo): """Check if there are multiple definitions of the same repository in a pre-commit configuration object. Parameters ---------- config : dict Pre-commit configuration dictionary. repo : str Repository to check for multiple definitions. Returns ------- bool : ``True`` if there are more than one definition of the passed repository in the configuration dictionary, ``False`` otherwise. """ return len([_repo for _repo in config["repos"] if _repo["repo"] == repo]) > 1
10f44cd6d6d1ef2313a2b8b6ab20b81df8294565
696,217
def get_links_from_wiki(soup, n=5, prefix="https://en.wikipedia.org"): """ Extracts `n` first links from wikipedia articles and adds `prefix` to internal links. Parameters ---------- soup : BeautifulSoup Wikipedia page n : int Number of links to return prefix : str, default="https://en.wikipedia.org"" Site prefix Returns ------- list List of links """ arr = [] # Get div with article contents div = soup.find("div", class_="mw-parser-output") for element in div.find_all("p") + div.find_all("ul"): # In each paragraph find all <a href="/wiki/article_name"></a> and # extract "/wiki/article_name" for i, a in enumerate(element.find_all("a", href=True)): if len(arr) >= n: break if ( a["href"].startswith("/wiki/") and len(a["href"].split("/")) == 3 and ("." not in a["href"] and ("(" not in a["href"])) ): arr.append(prefix + a["href"]) return arr
5d0b77bf82cc5e09cc3db3fe9e0bd0b58bc81f55
696,218
import bisect def find_ge(array, x): """Find leftmost item greater than or equal to x. Example:: >>> find_ge([0, 1, 2, 3], 1.0) 1 **中文文档** 寻找最小的大于等于x的数。 """ i = bisect.bisect_left(array, x) if i != len(array): return array[i] raise ValueError
6f1aaa40da6d00acd15ee86d1db161f714c6d5d3
696,219
def reverse_list (list): """ :param: list :return: list Return a list, whose elements are in reversed order e.g. reverse_list([30,40,50]) returns [50,40,30] """ reversed=[] #Copy the first element of the given list into empty reversed list: reversed list is now [30] reversed.append(list[0]) #Insert second element 40 into reversed list at index 0, so the list now is [40,30] etc. for i in list[1:]: reversed.insert(0,i) return reversed
a3370aa505e19a4e4bca76d765c8f3859ac106d2
696,221
def inputInt(prompt, min=0, max=100): """ inputInt retourne un entier saisit par l'utilisteur. La saisit est sécurisée: en de mauvaises entrés on redemande à l'utilisateur l'entier. Si l'utilisateur quitte le proggrame avec Contrôle+C, le programme s'arrête. """ while True: try: i = int(input(prompt)) except KeyboardInterrupt as e: print() exit(0) except Exception as e: print(f"Valeur invalide") continue if min <= i <= max: return i print(f"La valeur doit être entre ${min} et ${max}")
fa1a9ca1bcbdbf9dd46c37ecb242bb012c14d9e9
696,222
from pathlib import Path def is_single_repository(repo_path: str) -> bool: """ This function returns True if repo_path points to a single repository (regular or bare) rather than a folder containing multiple repositories. """ # For regular repositories if Path("{}/.git".format(repo_path)).exists(): return True # For bare repositories if (Path("{}/hooks".format(repo_path)).exists() and Path("{}/refs".format(repo_path)).exists()): return True return False
c9b2c709984b79a36c36d898d0e337d7a9c3f725
696,230
def requires_to_requires_dist(requirement): """Compose the version predicates for requirement in PEP 345 fashion.""" requires_dist = [] for op, ver in requirement.specs: requires_dist.append(op + ver) if not requires_dist: return '' return " (%s)" % ','.join(sorted(requires_dist))
1a394de51d18b0a3cc4cb922364d352a29bccb09
696,231
def load_cows(filename): """ Read the contents of the given file. Assumes the file contents contain data in the form of comma-separated cow name, weight pairs, and return a dictionary containing cow names as keys and corresponding weights as values. Parameters: filename - the name of the data file as a string Returns: a dictionary of cow name (string), weight (int) pairs """ # Initialize dictionary cows = {} # Open file in read mode using 'with' keyword for context management with open(filename, 'r') as f: for line in f: current_cow = line # Split the line into a list of the form [cow_name, cow_weight] cow_name, cow_weight = current_cow.split(',') cows[cow_name] = int(cow_weight) return cows
6245d9f20791316e5e7f370a8d53d8d590ff87c7
696,232
import math def create_space(lat, lon, s=10): """Creates a s km x s km square centered on (lat, lon)""" v = (180/math.pi)*(500/6378137)*s # roughly 0.045 for s=10 return lat - v, lon - v, lat + v, lon + v
7f39942cdb65a274ebf77257941211fa59f7cf89
696,233
import random def uniqueof20(k, rep=10000): """Sample k times out of alphabet, how many different?""" alphabet = 'ACDEFGHIKLMNPQRSTVWY' reps = [len(set(random.choice(alphabet) for i in range(k))) for j in range(rep)] return sum(reps) / len(reps)
349f1bd964419585df46e13d3cde64d8f5a42c86
696,238
def policy_name_as_regex(policy_name): """Get the correct policy name as a regex (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*) :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.* """ p = policy_name.partition('.') return p[0] + p[1] + ".*" + p[2] + ".*"
5b60a6f35a30af5f3514a43c24c7bee25505adfb
696,240
def get_primary_key_params(obj): """ generate a dict from a mapped object suitable for formatting a primary key logline """ params = {} for key in obj.__table__.primary_key.columns.keys(): params[key] = getattr(obj, key) return params
aad247b31925389bca21ef35fc6416c286587eee
696,243
def mean(numbers): """Return the arithmetic mean of a list of numbers""" return float(sum(numbers)) / float(len(numbers))
26601c23b8b6af48895a43f6e596e25eb626e7d6
696,245
def margined(arr, prop): """Returns (min(arr) - epsilon, max(arr) - epsilon), where epsilon = (max(arr) - min(arr)) * prop. This gives the range of values within arr along with some margin on the ends. ARR: a NumPy array PROP: a float""" worst = arr.min() best = arr.max() margin = (best - worst) * prop return (worst - margin, best + margin)
65ea4e99453ae300b08094f1891a31cca3d302bd
696,248
def faceAreaE3(face): """Computes the area of a triangular DCEL face with vertex coordinates given as PointE3 objects. Args: face: A triangular face of a DCEL with vertex .data given by PointE3 objects. Returns: The area of the triangular face. """ p0, p1, p2 = [v.data for v in face.vertices()] v0 = p1 - p0 v1 = p2 - p0 return v0.cross(v1).norm() * 0.5
412031297213a702f0579f8c86c23c93baaff8c8
696,250
import asyncio def get_active_loop() -> asyncio.AbstractEventLoop: """returns the current active asyncio loop or creates a new one. Returns: asyncio.AbstractEventLoop """ loop = asyncio.events._get_running_loop() loop = asyncio.new_event_loop() if loop is None else loop return loop
42c15961326d2a5a372237a8455fbe4f292dff80
696,251
def gen_datavalue_list(soup): """Create a list of all the datavalues on the BLS EAG webpage.""" datavalue_list_soup = soup.findAll('span', 'datavalue') datavalue_list = [] for datavalue in datavalue_list_soup: datavalue_list.append(datavalue.text) return datavalue_list
8901511f0f65945c200b2ccc77d4cb011da41453
696,252
def bet_size_sigmoid(w_param, price_div): """ Part of SNIPPET 10.4 Calculates the bet size from the price divergence and a regulating coefficient. Based on a sigmoid function for a bet size algorithm. :param w_param: (float) Coefficient regulating the width of the bet size function. :param price_div: (float) Price divergence, forecast price - market price. :return: (float) The bet size. """ return price_div * ((w_param + price_div**2)**(-0.5))
cbc6c8d70f6f000e701f140ccbae34b55d7a46df
696,253
import re def slugify(text): """ Returns a slug of given text, normalizing unicode data for file-safe strings. Used for deciding where to write images to disk. Parameters ---------- text : string The string to slugify Returns ------- slug : string A normalized slug representation of the text .. seealso:: https://bit.ly/2NW7s1j """ slug = re.sub(r"[^\w]+", " ", text) slug = "-".join(slug.lower().strip().split()) return slug
8ac550ed32627a6c8a145b9442960e064ebd44e2
696,254
def flight_time_movies_1_brute_force(movie_lengths, flight_length): """ Solution: Brute force iterative solution compares each movie length with all subsequent movie lengths. Complexity: Time: O(n^2) Space: O(1) """ if len(movie_lengths) < 2: raise ValueError('movie length list must be at least 2 items long') # For each movie length for index, movie_length_first in enumerate(movie_lengths): movie_lengths_sub = movie_lengths[0:index] + movie_lengths[ index + 1:len( movie_lengths)] # Check all other movie lengths (skipping over the first movie length) for movie_length_second in movie_lengths_sub: if movie_length_first + movie_length_second == flight_length: return True return False
612825507cf1aea086bcfa37b702e6a778d85b7c
696,256
from datetime import datetime def datetime_type(string): """ Validates UTC datetime. Examples of accepted forms: 2017-12-31T01:11:59Z,2017-12-31T01:11Z or 2017-12-31T01Z or 2017-12-31 """ accepted_date_formats = ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%MZ', '%Y-%m-%dT%HZ', '%Y-%m-%d'] for form in accepted_date_formats: try: return datetime.strptime(string, form) except ValueError: continue raise ValueError("Input '{}' not valid. Valid example: 2017-02-11T23:59:59Z".format(string))
36acb1d83b38310c463b2376fa2284fb6e9ad73e
696,257
import re def _remove_line_end_ellipsis_or_pass_keyword(line: str) -> str: """ Remove ellipsis or pass keyword from end of line (e.g., `def sample_func(): ...` or `def sample_func(): pass`). Parameters ---------- line : str Target line string. Returns ------- result_line : str Line string that removed ellipsis or pass keyword string. """ if line.endswith(' ...'): line = re.sub(pattern=r' ...$', repl='', string=line) return line if line.endswith(' pass'): line = re.sub(pattern=r' pass$', repl='', string=line) return line return line
5ff12264670184737b2d9cc69f9fb2d8eca66cd9
696,258
def i_to_r(i, L, dx): """Return coordinates of lattice indices in continuous space. Parameters ---------- i: integer array, shape (a1, a2, ..., d) Integer indices, with last axis indexing the dimension. It's assumed that all components of the vector lie within plus or minus (L / dx) / 2. L: float Length of the lattice, assumed to be centred on the origin. dx: float Spatial lattice spacing. This means the number of lattice points is (L / dx). Returns ------- r: float array, shape of i Coordinate vectors of the lattice points specified by the indices. """ return -L / 2.0 + (i + 0.5) * dx
3c9f6ecc87a5220d487b434873f63c04f9933720
696,262
def _process_for_token(request): """ Checks for tokens in formdata without prior knowledge of request method For now, returns whether the userid and token formdata variables exist, and the formdata variables in a hash. Perhaps an object is warranted? """ # retrieve the formdata variables if request.method == 'GET': formdata_vars = request.GET else: formdata_vars = request.form formdata = { 'vars': formdata_vars, 'has_userid_and_token': 'userid' in formdata_vars and 'token' in formdata_vars} return formdata
14088de395c977ce6a59da3384fa8d49b213e791
696,263
def make_response(error, message=None, image_base64=None): """ Generates the ObjectCut JSON response. :param error: True if the response has to be flagged as error, False otherwise. :param message: Message to return if it is a error response. :param image_base64: Image result encoded in base64 if it is a success response. :return: ObjectCut JSON response. """ response = dict(error=error) if error: response['message'] = message else: response['response'] = dict(image_base64=image_base64) return response
3be80141811fa493441a1ab964b4b6014a183dd1
696,265
def _cut(match): """ Cut matched characters from the searched string. Join the remaining pieces with a space. """ string, start, end = match.string, match.start(), match.end() if start == 0: return string[end:] if end == len(string): return string[:start] return ' '.join((string[:start], string[end:]))
5a9b1ac7a4030b972d14b04c81acb343066b3f2b
696,266
def parse_metadata_words(language='english', quality='low'): """ Identifies words corresponding to different metadata in the language Parameters: ----------------------------------- language : str Name of the language whose testing data to fetch quality : str size of the dataset to consider Returns: ----------------------------------- metadata_words : dict A dictionary with all the words grouped by metadata """ metadata_words = {} filepath = "psynlp/data/{}-train-{}".format(language, quality) file = open(filepath, 'r') for line in file.readlines(): source, dest, metadata = line.split("\t") if "*" not in source and "*" not in dest: metadata = metadata.strip() if metadata in metadata_words: metadata_words[metadata].append((source, dest)) else: metadata_words[metadata] = [] return metadata_words
7b8430c3e9c553167e5d710ef5e1d0ddaaeded00
696,267
from typing import List def split_list(lst: List[str], wanted_parts: int = 1) -> List[List[str]]: """ Splits a list into a list of lists of each of size 'wanted_parts' Args: lst: List to be split into smaller parts wanted_parts: Desired size of each smaller list Returns: A list of lists of each of size 'wanted_parts' """ length = len(lst) return [lst[i * length // wanted_parts: (i + 1) * length // wanted_parts] for i in range(wanted_parts)]
204711aa6b8c14c54673182e6d89addd8ccdc857
696,271
def gamma_to_tau_hard_threshold(gamma): """Converts gamma to tau for hard thresholding """ return 0.5 * gamma ** 2
4034918244477182cd3d7a14b97710c0731e03e3
696,274
def build_sample_map(flowcell): """Build sample map ``dict`` for the given flowcell.""" result = {} rows = [(lane, lib["name"]) for lib in flowcell["libraries"] for lane in lib["lanes"]] i = 1 for _, name in sorted(set(rows)): if name not in result: result[name] = "S{}".format(i) i += 1 return result
faf43ca65146093462ae26a9c18ebb238e23a7ff
696,276
def indent( text, # Text to indent char=' ', # Character to use in indenting indent=2 # Repeats of char ): """ Indent single- or multi-lined text. """ prefix = char * indent return "\n".join([prefix + s for s in text.split("\n")])
f170745f99a2bb151e79c2f468cf23880d60b3e5
696,281
def first_element_or_none(element_list): """ Return the first element or None from an lxml selector result. :param element_list: lxml selector result :return: """ if element_list: return element_list[0] return
df9c3437f38a50db96f0f4f946ede41916e5e2cf
696,282
def is_public(data, action): """Check if the record is fully public. In practice this means that the record doesn't have the ``access`` key or the action is not inside access or is empty. """ return "_access" not in data or not data.get("_access", {}).get(action)
2c5c80f8e16014f08df2cc34696cea8249e633b1
696,283
def __slice_scov__(cov, dep, given): """ Slices a covariance matrix keeping only the covariances between the variables indicated by the array of indices of the independent variables. :param cov: Covariance matrix. :param dep: Index of dependent variable. :param given: Array of indices of independent variables. :return: A |given| x |given| matrix of covariance. """ row_selector = [x for x in range(cov.shape[0]) if x in given and x != dep] col_selector = [x for x in range(cov.shape[1]) if x in given and x != dep] v = cov[row_selector, :] v = v[:, col_selector] return v
a5742c8dc4db245521477b0bf8c6ad8f3b463a2b
696,286
import random import string def rnd_string(n=10): """Generate a random string.""" return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(n))
1869afa5e950c24d75a446c54b9f53abd942c007
696,291
from typing import Any from typing import Iterable from typing import Optional def find_in_list(element: Any, container: Iterable[Any]) -> Optional[Any]: """ If the element is in the container, return it. Otherwise, return None :param element: to find :param container: container with element :return: element or None """ return next(iter([elem for elem in container if elem == element]), None)
4fd775e93472f466b90eb0c2ee4fda6aa6ead69e
696,298
import collections def _get_tasks_by_domain(tasks): """Returns a dict mapping from task name to a tuple of domain names.""" result = collections.defaultdict(list) for domain_name, task_name in tasks: result[domain_name].append(task_name) return {k: tuple(v) for k, v in result.items()}
53483e8ddf28490b6c00a4d456b77b48de1aeb7d
696,299
def splitFrom(df, attr, val): """ Split DataFrame in two subset based on year attribute :param df: DataFrame to split :param attr: attribute on which split data :param val: value of attribute where do split :return: two subset """ if attr not in df.columns: raise ValueError("******* "+attr+" not in DataFrame *******") subfd1 = df.loc[df[attr] < val] subfd2 = df.loc[df[attr] >= val] return subfd1, subfd2
e2c4f0c03d4b15ec2915e22bb905d4202cdf66cb
696,300
def subtract_vect(a, b): """ subtract vector b from vector a Deprecated, use mpmath instead!!! :param a: [float, float, float] :param b: [float, float, float] >>> subtract_vect([1, 2, 3], [3, 2, 2]) (-2, 0, 1) """ return (a[0] - b[0], a[1] - b[1], a[2] - b[2])
3465a670158a0ae34879a7d21599a9b098733f4d
696,304
import re def generate_root_filename(rawname, add="_mass"): """Generate the appropriate root filename based on a file's LJH name. Takes /path/to/data_chan33.ljh --> /path/to/data_mass.root """ fparts = re.split(r"_chan\d+", rawname) prefix_path = fparts[0] return prefix_path + add + ".root"
412056f37ffea1835b2f63b346ff4168d8391d2f
696,306
def get_top10(recommendations_list): """ Returns the first 10 elements of a list""" return recommendations_list[:10]
32be743fda0a8eb3932416ef487993ec686d7bc8
696,310
def roi_to_matlab(rois): """ ROIs to MATLAB. Adds one to index -------------------------- :param rois: All ROIs in experiment :return: New ROIs """ for roi in rois: roi.index += 1 return rois
5a6fe428eaea89498f62e46469c925c51e56e2b3
696,312
def read_xsc_step_number(xsc_filename): """ Read a NAMD .xsc file to extract the latest step to use as input for restarts and such. """ last_step = 0 with open(xsc_filename, "r") as f: for line in f.readlines(): if line.startswith("#"): continue last_step = int(line.strip().split(" ")[0]) return last_step
115db280e4b7cf43e9b781b4f8855e0af8792b06
696,313
def read_file(filename): """Read a file and return lists Parameters: filename - a text file to read Returns: list: lines """ with open(filename) as file: lines = file.read().splitlines() return lines
f16fa3e1923a2687f72438fa4223da0c82a0043e
696,315
def _mock_authenticate_user(_, client=None): """Mock Pycognito authenticate user method. This code is from Pycognito's test suite.""" return { "AuthenticationResult": { "TokenType": "admin", "IdToken": "dummy_token", "AccessToken": "dummy_token", "RefreshToken": "dummy_token", } }
dccbdf5138eea63c543a824de3c003efb5af6210
696,317
def iter_first(sequence): """Get the first element from an iterable or raise a ValueError if the iterator generates no values. """ it = iter(sequence) try: return next(it) except StopIteration: raise ValueError()
007648dcbc903572ca33221c5884febc7e78d956
696,318
def compareRule(origFileName): """ Function that applies a rule to a file name to be comparable to other file names. Basically it extracts the file name part to compare with others. Example: tif files that only differ in one character at the end, like 038_FJB_1904-001a.tif and 038_FJB_1904-001b.tif """ compareFileName = origFileName.decode('utf-8').rstrip('.tif')[:-1] return compareFileName
5e7bdad032d5475eb988637210f951bb97bdc0f7
696,319
def plural(word, items): """Returns "N words" or "1 word".""" count = len(items) if isinstance(items, (dict, list, set, tuple)) else items return "%s %s%s" % (count, word, "s" if count != 1 else "")
a40493ff2cf09dc5e033962037b544f02d9f4666
696,321
def nillable_string(func): """Decorator that retuns None if input is None.""" def wrapper(cls, string): if string is None: return None else: return func(cls, string) return wrapper
e4dc2fda61334e6ed1368dfca431bdc5b8479e6c
696,322
import re def get_master_names(desired_master_state, name_regex): """Returns masters found in <desired_master_state> that match <name_regex>. Args: desired_master_state: A "desired_master_state" object, e.g. as returned by desired_state_parser Returns: [str1, str2, ...] All masters found in <desired_master_state> """ # Modify regex to allow for optional "master." prefix name_regex = r'(master\.)?' + name_regex master_matcher = re.compile(name_regex) return [m for m in desired_master_state["master_states"].keys() if master_matcher.match(m)]
9343964103d1e93ff0d6de7d019c1fd206e84d3b
696,323
def merge_list_entries(list_to_merge): """Merge overlapping tuples in a list. This function takes a list of tuples containing exactly two numbers (as floats) with the smaller number first. It sorts them by lower bound, and then compares them to see if any overlap. Ultimately it returns a list of tuples containing the union of any tuples that overlap in range. Parameters ---------- list_to_merge : list A list of tuples of floats, denoting regions on the number line. Returns ------- list A list containing all the overlapping regions found in the input list. """ merged = [] sorted_by_lower_bound = sorted(list_to_merge, key=lambda tup: tup[0]) for higher in sorted_by_lower_bound: if not merged: merged.append(higher) else: lower = merged[-1] if higher[0] <= lower[1]: upper_bound = max(lower[1], higher[1]) merged[-1] = (lower[0], upper_bound) else: merged.append(higher) return merged
e507a855e7b6dc0330ac21dfe159a793d6e5cd8c
696,324
def get_organic_aerosols_keys(chem_opt): """ Return the anthropogenic and biogenic keys """ asoa_keys = None bsoa_keys = None if chem_opt == 106: asoa_keys = ('orgaro1i', 'orgaro1j', 'orgaro2i', 'orgaro2j', 'orgalk1i', 'orgalk1j', 'orgole1i', 'orgole1j') # SOA Anth bsoa_keys = ('orgba4i', 'orgba4j', 'orgba3i', 'orgba3j', 'orgba2i', 'orgba2j', 'orgba1i', 'orgba1j') # SOA Biog elif chem_opt == 108 or chem_opt == 100: asoa_keys = 'asoa1j,asoa1i,asoa2j,asoa2i,asoa3j,asoa3i,asoa4j,asoa4i'.split(',') # SOA Anth bsoa_keys = 'bsoa1j,bsoa1i,bsoa2j,bsoa2i,bsoa3j,bsoa3i,bsoa4j,bsoa4i'.split(',') # SOA Biog else: print('PP: this chem_opt {} is not implemented, dont know how to combine organics') return asoa_keys, bsoa_keys
68b342adde5c0dd1de9e81de12de99c0cab40d0b
696,325
def mean_wikipedia_frequency(frequency_cache, lemmatizer, tokens): """ Retrieves frequency for a list of tokens and returns mean frequency. :param frequency_cache: a frequencey lookup table :param lemmatizer: a lemmatizer :param tokens: a sequence of tokens (strings) """ freq_sum = 0 for token in tokens: lemma = lemmatizer.lemmatize(token) freq_sum = frequency_cache.get(lemma, 1) return freq_sum / len(tokens)
d92334cd99127ee60a323db39b71970ad4b1c1f2
696,326
def benefits(income, n_children, params): """Calculate benefits according to income, number of children and params. Args: income (pd.Series) n_children (pd.Series): Same length as income. params (pd.series): Must contain "benefit_per_child" and "benefit_cutoff" Returns: pd.Series: The benefits. """ raw_benefits = n_children * params.benefit_per_child benefits = raw_benefits.where(income <= params.benefit_cutoff, 0) return benefits
beb6f3f3a695ee4ae2b76ce7058906ca14ccebeb
696,327
def collect_first_sep(_, nodes): """ Used for: Elements = Elements "," Element; """ e1, _, e2 = nodes if e2 is not None: e1 = list(e1) e1.append(e2) return e1
378dc75f20d0e5a03c2c34c1fd02feea651e5fb7
696,328
def camelcase_to_underscores(argument): """Converts a camelcase param like theNewAttribute to the equivalent python underscore variable like the_new_attribute""" result = "" prev_char_title = True if not argument: return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() except IndexError: next_char_title = True upper_to_lower = char.istitle() and not next_char_title lower_to_upper = char.istitle() and not prev_char_title if index and (upper_to_lower or lower_to_upper): # Only add underscore if char is capital, not first letter, and next # char is not capital result += "_" prev_char_title = char.istitle() if not char.isspace(): # Only add non-whitespace result += char.lower() return result
d50d77cf0952c06f1d2ea003d4e6b2e534ef84f7
696,329
def pfreduce(func, iterable, initial=None): """A pointfree reduce / left fold function: Applies a function of two arguments cumulatively to the items supplied by the given iterable, so as to reduce the iterable to a single value. If an initial value is supplied, it is placed before the items from the iterable in the calculation, and serves as the default when the iterable is empty. :param func: A function of two arguments :param iterable: An iterable yielding input for the function :param initial: An optional initial input for the function :rtype: Single value Example:: >>> from operator import add >>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2) >>> sum_of_squares([3, 4, 5, 6]) 86 """ iterator = iter(iterable) try: first_item = next(iterator) if initial: value = func(initial, first_item) else: value = first_item except StopIteration: return initial for item in iterator: value = func(value, item) return value
621b48d894c2c510a713f6948e623c791cd429f5
696,331
from typing import Counter def get_most_common(exercises, n=3): """ Get n most common sports """ exes = [e.sport for e in exercises] cnt = Counter() for e in exes: cnt[e] +=1 commons = cnt.most_common(n) commons_array = [co[0] for co in commons] return commons_array
204684d2d284cc902b5e64b55757d486af71a8ad
696,335
def available_colors(G, vertex, number_of_colors): """Returns all the available colors for vertex Parameters: G: a networkx graph with Graph Nodes vertex: the vertex number (int) number_of_colors: the number of colors (int) Returns: colors: list of available colors (list) """ colors = [x for x in range(0, number_of_colors)] for neighbor in G.neighbors(vertex): try: index = colors.index(G.nodes[neighbor]['node'].color) colors.pop(index) except Exception: pass return colors
b19dfe9516eb7a74d259a3d69b868e78fe56d3e9
696,341
def parseTextFile(file_name, delimiter=",", header=0): """ Parse a text file to a list. The file contents are delimited and have a header. :param file_name: The path to the file :type file_name: str :param delimiter: The delimiter to use to parse the file :type delimiter: str :param header: The number of lines at the top of the file to ignore :type header: int :return: Text file parsed into a list :rtype: list """ with open(file_name) as f: # Skip the header for i in range(header): next(f) data = [] # Parse file contents for line in f: # Remove the newline char line = line.replace("\n", "").replace("\r", "") # Split the line by the delimiter line = line.split(delimiter) # Strip whitespaces from individual entries in the line for i, entry in enumerate(line): line[i] = entry.strip() # Add the contents of the line to the data list data.append(line) return data
fe711396e13f2dd6a7bb688b570f59d3a23a850a
696,342
def is_m_to_n_pandigital(num, bound_m, bound_n): """ Determine if a number is m-to-n pandigital. """ digit_count = dict() list_form = list(str(num)) for _digit in list_form: # return early if any digit shows up more than once if _digit in digit_count.keys(): return False digit_count[_digit] = 1 target_count = dict() for _d in range(bound_m, bound_n + 1): target_count[str(_d)] = 1 # compare two sets if digit_count == target_count: return True return False
ab0fb7b1e8369ea7118408dac108c87d17b07eef
696,345
def hours_mins_2_mins(time): """ Converts a time consisting of hours & minutes to minutes Parameters ------------ time : str Time, in hours & minutes, to be converted to minutes Returns ------------ mins_tot : int Time converted from hours:minutes to minutes """ if (type(time) != str): print("Error: Time must be of type str") return -1 else: hrs = int(time[:2]) mins = int(time[2:]) mins_tot = (hrs * 60) + mins return mins_tot
acbbbdea7617f2db5390e01436127bb8c423c634
696,348
def next_multiple(x: int, k: int = 512) -> int: """Calculate x's closest higher multiple of base k.""" if x % k: x = x + (k - x % k) return x
fbf8cf548851d0c57867292f9ddcfc33de9b03c0
696,350
def convert_to_DNA(sequence): """ Converts RNA to DNA """ sequence = str(sequence) sequence = sequence.upper() return sequence.replace('U', 'T')
2a69a3102df8f5a16b2b049fb1d80dae720b10e3
696,351
def fit(model, data_bunch, **kwargs): """ Fits an H2O Model :param model: An H2O Model :param data_bunch: A DataBunch with "train" and "valid" datasets that consist of H2ODataWrapper or H2OSparklingDataWrapper :param kwargs: Keyword arguments to be passed to model.train :return: A (fitted) H2O Model """ train_df = data_bunch.train.full_data.underlying valid_df = data_bunch.valid.full_data.underlying features_list = data_bunch.train.features.field_names targets_list = data_bunch.valid.targets.field_names[0] model.train(x=features_list, y=targets_list, training_frame=train_df, validation_frame=valid_df, **kwargs ) return model
f9d0dd6835f145d00b7da6a99acca9ccc90653a8
696,352
import yaml def pretty_yaml(value, file_=None): """ Print an object to a YAML string :param value: object to dump :param file_: Open, writable file object :return: str (YAML) """ return yaml.dump(value, stream=file_, indent=2, allow_unicode=True, default_flow_style=False)
6c59ac3b34a0e4fdd8878074298787d30a8404ff
696,354
import functools import operator def get_dict_element(data, path, delimiter='.'): """ Traverse a dict using a 'delimiter' on a target string. getitem(a, b) returns the value of a at index b """ return functools.reduce(operator.getitem, path.split(delimiter), data)
2699c6c514f894a9d38e92de982efb9c27ddfa46
696,355