content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def _get_absolute_paths(ctx, file_labels): """Return the absolut paths for the files""" return [ctx.path(file_label) for file_label in file_labels]
e77e53f7c7cfe9b2a3669e428eb1e1ee43914520
219,265
def pmap(iterable, fn): """ Pipeable version of `map`, i.e., the arguments are swapped. Because `map` expects a function as first argument it is not really suited for `pipe`. `pmap` swaps the arguments. Args: iterable: some iterable fn (callable): Returns: result of map(fn, iterable) Examples: >>> pipe( ... range(5), ... (pmap, lambda x: x*x), ... list) [0, 1, 4, 9, 16] """ return map(fn, iterable)
22402c643437ab739adb26daf2dc1eb9c9a0fc46
342,718
def window_width(g_core: float, t_in: float, t_out: float, g: float, t_r: float, g_r: float) -> float: """ is the calculated winding width if there x g_core - is the distance between the core and the inner winding in [mm] x t_in - is the thickness of the inner winding in [mm] x t_out - is the thickness of the outer winding in [mm] x g is - is the main gap in [mm] x t_r - is the width of the regulating winding in [mm] x g_r - is the distance between the outer winding and the regulating winding [mm] g is considered as a phase distance at the end of the windings """ return round(g_core + t_in + t_out + g + t_r + g_r + g, 1)
fd86eeb816c75b8e7d940d5321259c62abc0ec50
34,556
def get_bondethernets(yaml): """Return a list of all bondethernets.""" ret = [] if "bondethernets" in yaml: for ifname, _iface in yaml["bondethernets"].items(): ret.append(ifname) return ret
c1ddc43bfa79282f1fbd8ce47677f2c1f05c6f3a
552,489
import math def gowa(w, wm, l=1.0): """ Generalized Ordered Weighted Averaging Operator More info can be found here: https://pdfs.semanticscholar.org/2810/c971af0d01d085c799fb2295dc5668d055c8.pdf l = -1 = Ordered Weighted Harmonic Averaging Operator l = -.000000000001 = Ordered Weighted Geometric Averaging Operator l = 1 = Ordered Weighted Arithmetic Averaging Operator l = 2 = Ordered Weighted Quadratic Averaging Operator w = list of weights wm = list of importance weighted membership values l = lambda real number specifying type of owa to use returns ordered weighted average """ if len(w) != len(wm): raise ValueError("Weights and membership value lists must be of equal length.") if l == 0: raise ZeroDivisionError("Param l cannot be 0. Use -.000000000001 for owg.") wm.sort(reverse=True) s = 0 for i in range(len(w)): s += w[i] * math.pow(wm[i], l) return math.pow(s, 1/l)
152cda6133e4b3ff87769364a60f13128c154b59
194,027
def get_workflows_without_active_cycles(conn): """Get workflows with no active cycles""" res = conn.execute(""" SELECT id FROM workflows WHERE status='Active' AND NOT EXISTS ( SELECT 1 FROM cycles WHERE cycles.workflow_id = workflows.id) """) ids = [workflow[0] for workflow in res.fetchall()] return ids
94d59a50e741beb2a0f781b096a575c723e7b87f
333,813
import torch def quat_to_rotMat(q): """Convert quaternions to rotation matrices. Using equation provided in XSens MVN Manual: https://www.xsens.com/hubfs/Downloads/usermanual/MVN_User_Manual.pdf Args: q (torch.Tensor): quaternion(s) to convert to rotation matrix format Returns: torch.Tensor: rotation matrix converted from quaternion format """ if len(q.shape) != 2: q = q.unsqueeze(0) assert q.shape[1] == 4 r0c0 = q[:, 0]**2 + q[:, 1]**2 - q[:, 2]**2 - q[:, 3]**2 r0c1 = 2*q[:, 1]*q[:, 2] - 2*q[:, 0]*q[:, 3] r0c2 = 2*q[:, 1]*q[:, 3] + 2*q[:, 0]*q[:, 2] r1c0 = 2*q[:, 1]*q[:, 2] + 2*q[:, 0]*q[:, 3] r1c1 = q[:, 0]**2 - q[:, 1]**2 + q[:, 2]**2 - q[:, 3]**2 r1c2 = 2*q[:, 2]*q[:, 3] - 2*q[:, 0]*q[:, 1] r2c0 = 2*q[:, 1]*q[:, 3] - 2*q[:, 0]*q[:, 2] r2c1 = 2*q[:, 2]*q[:, 3] + 2*q[:, 0]*q[:, 1] r2c2 = q[:, 0]**2 - q[:, 1]**2 - q[:, 2]**2 + q[:, 3]**2 r0 = torch.stack([r0c0, r0c1, r0c2], dim=1) r1 = torch.stack([r1c0, r1c1, r1c2], dim=1) r2 = torch.stack([r2c0, r2c1, r2c2], dim=1) R = torch.stack([r0, r1, r2], dim=2) return R.permute(0, 2, 1)
f61f0c1044f6cf3e3dbc4815b410b9943823f35f
163,823
def was_active(reference_date_value, asset): """ Whether or not `asset` was active at the time corresponding to `reference_date_value`. Parameters ---------- reference_date_value : int Date, represented as nanoseconds since EPOCH, for which we want to know if `asset` was alive. This is generally the result of accessing the `value` attribute of a pandas Timestamp. asset : Asset The asset object to check. Returns ------- was_active : bool Whether or not the `asset` existed at the specified time. """ return ( asset.start_date.value <= reference_date_value <= asset.end_date.value )
a99a49acd3f1eb278739d99660705abaa62533dd
601,303
def urljoin(*pieces): """Join a URL Joins the pieces together to an URL. Cannot be replaced by urllib.parse.urljoin, because that doesn't join parts of a path and doesn't allow for multiple pieces to be joined. Arguments: *pieces {str} -- pieces of URL """ return '/'.join(s.strip('/') for s in pieces)
88c96d43b2025e3db000d589c68cd081e8bcf1f2
341,965
def struct_from_json(spark, json_format_schema): """Returns a schema as a pyspark.sql.types.StructType from Avro schema""" df = spark \ .read \ .format("avro") \ .option("avroSchema", json_format_schema) \ .load() df.printSchema() return df.schema
ce6c37fe1806e4ec9f5c9ec74dced8a2e6c5bcd2
209,966
def get_task_error_message(task_result): """ Parse error message from task result. """ try: res = task_result['result'] except Exception: res = task_result for key in ('detail', 'message'): try: return res[key] except Exception: continue return str(res)
e761e98c96446c15d8c2bf465f32c663febcfd8b
120,741
def remove_colour(string: str) -> str: """ Removes colour from string """ string = string.replace('(colour_clear)', '') string = string.replace('(colour_cmd)', '') string = string.replace('(colour_prompt)', '') string = string.replace('(colour_rocky)', '') string = string.replace('(colour_success)', '') string = string.replace('(colour_warning)', '') return string
dfcd3150e507e835eae55b7c17744de5d406b120
299,081
def treversed(*args, **kwargs): """Like reversed, but returns a tuple.""" return tuple(reversed(*args, **kwargs))
1b26313e9af2b8e515e81ca932adadf85925351e
102,562
from typing import List def read_file_list_lines(file_path: str) -> List[str]: """Read file list lines from file list txt file Arguments: file_path {str} - - path of file list txt Returns: List[str] - - file list lines """ with open(file_path, 'r') as f: lines = [l.strip() for l in f] return lines
c33e3e29cf00b740e3a62fd9936a9d31ab5b5b7c
404,863
def filtered(json, filter_keys): """ Returns a dict of key-value pairs filtering out the specified keys. >>> filtered({'c': 'C', 'xox': '!!', 'a': 'A'}, ['xox']) {'a': 'A', 'c': 'C'} """ return {k: v for k, v in json.iteritems() if k not in filter_keys}
f148c181826afaf69737d66d46affdee6e58d0dd
248,007
def solution(a: int, b: int, k: int) -> int: """ :return: The number of integers within the range [a..b] that are divisible by k. >>> solution(6, 11, 2) 3 >>> solution(3, 14, 7) 2 """ count = (b - a + 1) // k if b % k == 0: count += 1 return count
cd88085a63fcc4d33a06109ca272c1cb3480975b
627,245
def read_data_from(file_: str) -> dict: """Get ingredients and allergens from file.""" foods = {} fio = open(file_, "r") for idx, food in enumerate(fio): foods[idx] = { "ingredients": food.split(" (")[0].split(" "), "allergens": food[:-2].split("(contains ")[1].split(", ") } return foods
a83b5f63bf987f5ff523821a33c9c397e93e7f9b
578,073
from typing import Dict from typing import Any def marker_smoothing_exceptions(all_exceptions: Dict[str, Any], trial_name: str, marker_name: str) -> Dict[str, Any]: """Given all exceptions (all_exceptions) return just the ones for the specified marker (marker_name) and trial (trial_name).""" trial_exceptions = all_exceptions.get(trial_name, {}) return trial_exceptions.get(marker_name, {})
d6f33bd4f038f88de1e91e0446f4ee206549fbda
677,446
def convert_byte_to_str(term): """Convert a term from the bytes to str Args: term (bytes): The term in bytes Returns: str: The term in str """ return term.decode("utf-8")
7a914a23383c14c57d8866d22d0bf8a13356c16a
270,749
def get_bond_from_num(n: int) -> str: """Returns the SMILES symbol representing a bond with multiplicity ``n``. More specifically, ``'' = 1`` and ``'=' = 2`` and ``'#' = 3``. :param n: either 1, 2, 3. :return: the SMILES symbol representing a bond with multiplicity ``n``. """ return ('', '=', '#')[n - 1]
ea60ea266d7500d0ac2e83c2ce591cce10d17b19
315,498
def hex_to_rgb(hexcode): """ Convert Hex code to RGB tuple """ return (int(hexcode[-6:-4], 16), int(hexcode[-4:-2], 16), int(hexcode[-2:], 16))
0bf09edac600dcf1e6e0bd527d3e48d702a98add
19,090
def computeFraction( poi_messages, all_messages ): """ compute the fraction of messages to/from a person that are from/to a POI """ fraction = 0 if poi_messages == 'NaN' or all_messages == 'NaN': fraction = 'NaN' else: fraction = float(poi_messages)/all_messages return fraction
12acfb8bfdbd82637c16dbca78976516da510bf6
638,378
def frac(x, d): """ Utility func -- Works like fractional div, but returns ceiling rather than floor """ return (x + (d-1)) // d
e6cf5f90270a5c7cbad193318d32aa7fbaaf2fba
558,098
import inspect def input_list(f): """Return list of function inputs""" return inspect.getfullargspec(f).args
2c6ac4f86b0d9eeb05b503c7ad061b2d129c7b55
60,437
import logging def _setup_logger() -> logging.Logger: """Setup logging.""" log_format = '[%(asctime)s][%(levelname)s] %(message)s' stream_handler = logging.StreamHandler() stream_handler.setFormatter(logging.Formatter(log_format)) stream_handler.setLevel(logging.INFO) custom_logger = logging.getLogger(__name__) custom_logger.addHandler(stream_handler) custom_logger.setLevel(logging.INFO) custom_logger.propagate = False return custom_logger
276c5c301cfe0a3a58f8ba3a0ef38221a624bd9f
98,854
def add_default_value(arg_name, value, **kwargs): """ Add argument if it is not in the kwargs already """ if arg_name not in kwargs.keys(): kwargs[arg_name] = value return kwargs
4c1c319d8f8651a7001a8f585bc285d98be688cc
258,192
import base64 def urlsafe_b64encode(data): """urlsafe_b64encode without padding""" return base64.urlsafe_b64encode(data).rstrip(b'=')
de3a73fbfdd524004667d04ab8025364767a988f
382,253
def set_iscsi_target_node_auth( client, name, chap_group=None, disable_chap=None, require_chap=None, mutual_chap=None): """Set CHAP authentication for the target node. Args: name: Target node name (ASCII) chap_group: Authentication group ID for this target node disable_chap: CHAP authentication should be disabled for this target node require_chap: CHAP authentication should be required for this target node mutual_chap: CHAP authentication should be mutual/bidirectional Returns: True or False """ params = { 'name': name, } if chap_group: params['chap_group'] = chap_group if disable_chap: params['disable_chap'] = disable_chap if require_chap: params['require_chap'] = require_chap if mutual_chap: params['mutual_chap'] = mutual_chap return client.call('set_iscsi_target_node_auth', params)
ed7cbc22327c56fca7554fbdd330c2a92d3ea282
474,567
import pickle def load_pickle(fpath): """ Loads python object from a pickle file :param fpath: full path to file :return: parsed object """ with open(fpath, 'rb') as f: return pickle.load(f)
3c8147437e98b3b529834aaa427538f6690f0363
195,459
def mapChars (text, m): """ For all characters in text, replace if found in map m or keep as-is """ return ''.join (map (lambda x: m.get (x, x), text))
c7a111343b2adc3ea23b5c3befe206f56fc54c83
489,448
from typing import Counter def generate_samples(n_samples, func, *args, **kwargs): """Call a function a bunch of times and count the results. Args: n_samples: Number of time to call the function. func: The function results are counted from. *args **args: The arguments to pass to func. Returns: Counter containing results. """ samples = Counter() for _ in range(n_samples): res = func(*args, **kwargs) samples[res] += 1 return samples
625c2bf6713420e26704d2c2842504343be09434
5,400
def string_from_ids(ids): """ Concatenates the ids with ',' to do only one request for all ids @:return A concatenated string """ return ','.join(ids)
4838aa012d27da6c986dba7ca5a9720f773d181b
326,674
def validate_sequences(seed_sequence, extracted_full_seq, rnac=True): """ Validates whether the SEED sequence matches the sequence extracted at specific coordinates seed_sequence: A DNA/RNA sequecne extracted from the SEED alignment extracted_full: A DNA/RNA subsequence extracted at specific locations return: True if the sequences match, False otherwise. Returns False by default """ new_seed_sequence = seed_sequence if rnac is False: new_seed_sequence = seed_sequence.replace('U', 'T') if extracted_full_seq.find(new_seed_sequence) != -1: return True return False
e5e3244e76016de7344931c139d1cc92f4f34022
442,958
def dup_lshift(f, n, K): """ Efficiently multiply ``f`` by ``x**n`` in ``K[x]``. **Examples** >>> from sympy.polys.domains import ZZ >>> from sympy.polys.densearith import dup_lshift >>> f = ZZ.map([1, 0, 1]) >>> dup_lshift(f, 2, ZZ) [1, 0, 1, 0, 0] """ if not f: return f else: return f + [K.zero]*n
948b664ae8e5c283c5a932b39e93c99863eee602
159,533
def generate_table_configs(rng): """Generate random configurations for WiredTiger tables.""" internal_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024 leaf_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024 leaf_value_max = rng.choice([1, 32, 128, 256]) * 1024 * 1024 memory_page_max_lower_bound = leaf_page_max # Assume WT cache size of 1GB as most MDB tests specify this as the cache size. memory_page_max_upper_bound = round( (rng.randint(256, 1024) * 1024 * 1024) / 10) # cache_size / 10 memory_page_max = rng.randint(memory_page_max_lower_bound, memory_page_max_upper_bound) split_pct = rng.choice([50, 60, 75, 100]) prefix_compression = rng.choice(["true", "false"]) block_compressor = rng.choice(["none", "snappy", "zlib", "zstd"]) return "block_compressor={0},internal_page_max={1},leaf_page_max={2},leaf_value_max={3},"\ "memory_page_max={4},prefix_compression={5},split_pct={6}".format(block_compressor, internal_page_max, leaf_page_max, leaf_value_max, memory_page_max, prefix_compression, split_pct)
c23bcbba79bf491b7cd8938d8dfb771912ff85c3
144,188
def strp_brackets(text): """ Strip brackets surrounding a string. """ return text.strip().strip('(').strip(')')
5202268668d55816d7ae13fda95b2d57c7b68df5
109,849
def byte_to_int(b): """ Convert Unsigned byte to int :param b: byte value :return: int value """ return b & 0xFF
f4f8d6ba84195550d0b7efb91537893825c36b0d
478,921
def hyperlink_title(body, docpath, docname): """ Hyperlink titles by embedding appropriate a tag inside h1 tags (which should only be post titles). """ body = body.replace("<h1>", '<h1><a href="%s.html">' % (docpath + docname), 1) body = body.replace("</h1>", "</a></h1>", 1) return body
ae316226ef64a45c97cd6d094617edc2624d1cc8
695,810
def timedelta_filter(dates, timedelta): """ Make sure there is a minimum time delta between each date in the given list """ filtered_dates = [dates[0]] for date in sorted(dates[1:]): if date - filtered_dates[-1] > timedelta: filtered_dates.append(date) return filtered_dates
6c6ba9e7b9ae68e130d4272702b575a160abd895
52,665
import math def degrees_to_cardinal(degrees): """ Convert degrees to cardinal directions """ # Tuple of cardinal directions clockwise for 360 degrees cardinal_directions = ("N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE", "S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW") # Divide 360 degrees into 16 segments 0-15 # 22.5 degrees per segment # Shift incoming degrees by 11.25 to match Cardinal to Degree # Round down to the nearest integer cardinal_index = math.floor((degrees+11.25) / 22.5) # Take care of 348 to 360 returning 16, set to 0 cardinal_index = cardinal_index % 16 # Return the cardinal direction based on the tuple index return cardinal_directions[cardinal_index]
46f25bae0ed4ee293a11564ac52ec8240fe530af
215,536
def splitFrom(df, attr, val): """ Split DataFrame in two subset based on year attribute :param df: DataFrame to split :param attr: attribute on which split data :param val: value of attribute where do split :return: two subset """ if attr not in df.columns: raise ValueError("******* "+attr+" not in DataFrame *******") subfd1 = df.loc[df[attr] < val] subfd2 = df.loc[df[attr] >= val] return subfd1, subfd2
e2c4f0c03d4b15ec2915e22bb905d4202cdf66cb
696,300
import math def num_permutations(n): """ Get the number of length n permutations, n! (number of permutations of n elements = number of elements in :math:`S_n`). :param n: Length of permutations. :return: Number of length n permutations. :rtype: int """ return math.factorial(n)
515ff3a84aeacf790800dbf65f35cba6b0a5751c
141,688
def get_obj_path_name(object): """ Get the full correct name of the provided object. :param object: UObject :return: String of the Path Name """ if object: return object.PathName(object) else: return "None"
15f5682561e65ddb36d8c0a684b411e7da36a620
133,833
def get_otpauth_url(user, domain, secret_key): """Generate otpauth url from secret key. Arguments: .. csv-table:: :header: "argument", "type", "value" :widths: 7, 7, 40 "*user*", "string", "User." "*domain*", "string", "Domain." "*secret_key*", "string", "Base 32 secret key." Returns: Otpauth url. Usage:: import googauth secret_key = googauth.generate_secret_key() print googauth.get_otpauth_url('user', 'domain.com', secret_key) """ return ('otpauth://totp/' + user + '@' + domain + '?secret=' + secret_key)
0a44bd2f9ca57fe4ebf4f10c4e3a1862bbc16744
158,786
def vec_2_str(vec): """ Convert vector of integers to string. :param vec: [int, int, ...] :return: string """ char_vec = [chr(i) for i in vec] return ''.join(char_vec)
a1c89348dc7bed7e8a20620038aa01d3803884b8
489,858
import re def parse_show_spanning_tree_mst_config(raw_result): """ Parse the 'show spanning-tree mst-config' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show spanning-tree \ mst-config command in a dictionary of the form: :: { 'mst_config_id': '70:72:cf:d9:2c:f6', 'mst_config_revision': '8' 'no_instances': '2', 'instance_vlan': {'1': ['1','2'], '2': ['3','4']} } """ mst_conf_re = ( r'\s*MST\s*config\s*ID\s*:\s*(?P<mst_config_id>[^ ]+)\s*\n' r'\s*MST\s*config\s*revision\s*:' r'\s*(?P<mst_config_revision>[0-9]+)\s*\n' r'\s*MST\s*config\s*digest\s*:\s*(?P<mst_digest>[^ ]+)\s*\n' r'\s*Number\s*of\s*instances\s*:\s*(?P<no_instances>[0-9]+)\s*\n' ) instance_re = ( r'(?P<instance>^[0-9]+)\s*(?P<vlan>.+)\s*' ) error = [ r'No\s*record\s*found\.', r'\s*Spanning-tree\s*is\s*disabled' ] instance = {} result = {} for error_str in error: re_result = re.search(error_str, raw_result) if (re_result): result['error'] = str(raw_result) return result re_result = re.search(mst_conf_re, raw_result) assert re_result result = re_result.groupdict() for line in raw_result.splitlines(): re_result = re.search(instance_re, line) if re_result: partial = re_result.groupdict() instance[partial['instance']] = partial['vlan'].split(',') result['instance_vlan'] = instance return result
2f07e13ff7f79e4c15e0fe5111cbc18fb638dae8
699,538
def get_pokemon_title(driver): """ Gets the pokemon title including the name and the number of the pokemon. """ return driver.find_element_by_class_name("pokedex-pokemon-pagination-title").text
4ca126cea899a3c0ab54c7a89ccd759fdb8d3aa6
290,282
def sqrt(number): """ Calculate the floored square root of a number Args: number(int): Number to find the floored squared root Returns: int: Floored Square Root """ if type(number) != int: print("Please input an integer") return -1 if number < 0: return None mid_number = number // 2 min_number = 0 max_number = number while min_number <= max_number: mid_number = min_number + (max_number - min_number) // 2 target_number = mid_number * mid_number if target_number > number: if (mid_number - 1) * (mid_number - 1) < number: return mid_number - 1 max_number = mid_number elif target_number < number: if (mid_number + 1) * (mid_number + 1) > number: return mid_number elif (mid_number + 1) * (mid_number + 1) == number: return mid_number + 1 min_number = mid_number else: return mid_number
a4446983dfad0248d93f0c17ce3b7f1a467e2f10
306,072
def normalize_labels(labels): """ Normalize labels to probabilities""" s = sum(labels.values()) normalized = {} for k, v in labels.iteritems(): if s > 0: normalized[k] = v / s else: normalized[k] = v return normalized
99ea3918566ca2343c4cbf87e5455e2ec207e21c
58,131
def equalscontent(string1, string2): """Tests if two strings are equal. None is treated like an empty string. Trailing and leading whitespace is ignored.""" if not string1: string1 = "" if not string2: string2 = "" return string1.strip() == string2.strip()
ff68a0eca8cd2532347152f1d94064b3c6a739ef
110,336
import fnmatch def filter_modules(modpaths, patterns): """filter away modules that match a specific name pattern. Note: since modpaths are full filepaths, the name pattern MUST be specified for the path itself, as opposed to only the filename. Args: modpaths (List[str]): list of absolute modules filepath to filter patterns (List[str]): list of strings with Unix shell-style wildcards. Returns: List[str]: list of absolute filepaths that didnt match any pattern. """ for pattern in patterns: modpaths = [ filepath for filepath in modpaths if not fnmatch.fnmatch(filepath, pattern) ] return modpaths
b17c3b806a992e45dadd045197b11b1d09e646e6
155,180
def check_container(container, _type) -> tuple: """ Given a container object, check and see if the contents match the specified type If the test fails, return the offending type, false flag and the offending index """ if not(hasattr(container, '__iter__')): raise ValueError( f'expecting container object, received: {type(container).__name__}') for i, element in enumerate(container): if not(isinstance(element, _type)): return (type(element), False, i) return (_type, True, -1)
bdea48d769082195e62a349e10a06de3e5733355
228,479
def _IsCryptohomeMounted(cri): """Returns True if a cryptohome vault is mounted at /home/chronos/user.""" return cri.FilesystemMountedAt('/home/chronos/user').startswith( '/home/.shadow/')
741cbcbc8821f5e6bd73f88078fb8380e561c4d2
370,315
def BooleanFromString(s): """Interpret 's' as a boolean and return its value. Raise ValueError if it's not something we can interpret as true or false.""" s = s.lower() if s in ("true", "t", "1", "on", "yes", "y"): return True if s in ("false", "f", "0", "off", "no", "n"): return False raise ValueError("'%s' not a valid boolean" % (s,))
0b1bf77f113da032ea6677f2f56e610b2384cf8a
104,211
def get_format(fmt, string): """Return a dictionary containing a format for each byte order.""" return {"big": fmt(">" + string), "little": fmt("<" + string)}
7326e25454200945bb46db6275dd17518f88e8a4
303,374
from typing import List from typing import Dict def get_all_evidence_utterance_from_conversation( emotion: str, conversation_history: List[str] ) -> Dict[str, List[str]]: """Iterate through a conversation history to let each utterance be the evidence utterance. The last utterance is treated as the target utterance. Ouput dictionary is in a format which can be used with RecconSpanExtractionPreprocessor Args: emotion (str): Emotion of the target utterance conversation_history (List[str]): List of utterance in a conversation. The last utterance is used as the target utterance. Returns: Dict[str, List[str]]: Dictionary in a format that can be used with RecconSpanExtractionPreprocessor The dictionary looks like this: {'emotion': ['happiness'], 'target_utterance': ['......'], 'evidence_utterance': ['......'], 'conversation_history': ['......']} """ conversation_history_text = " ".join(conversation_history) target_utterance = conversation_history[-1] output = { "emotion": [], "target_utterance": [], "evidence_utterance": [], "conversation_history": [], } for evidence_utterance in conversation_history: output["emotion"].append(emotion) output["target_utterance"].append(target_utterance) output["evidence_utterance"].append(evidence_utterance) output["conversation_history"].append(conversation_history_text) return output
93cde9244c88250808fe214f23378bdd04ecb58c
469,041
import ujson def get_json_data(fname): """获取 JSON 数据 Args: fname (str): 存储 JSON 数据的文件路径和文件名 """ return ujson.load(open(fname, 'r'))
e289e927e702dbfc3b01741a15cc9afaf40b4d71
169,848
def convert_units(input_unit): """ Convert units into CloudWatch understandable units. """ input_unit = input_unit.lower() if input_unit == "s/op": return "Seconds" elif input_unit == "bytes" or input_unit == "byte": return "Bytes" elif input_unit == "op/s": return "Count/Second" elif input_unit == "ms": return "Milliseconds" else: print("Unknown unit type", input_unit)
1b253b33616b8e7b580f8e9fa630371ea413a52f
387,776
import json def write_json(d, fjson): """ Args: d (dict) - dictionary to write fjson (str) - file name of json to write Returns: written dictionary """ with open(fjson, 'w') as f: json.dump(d, f) return d
f7f9f58e4879d01e18497721779371d951c7881d
226,712
def find_chan_corr(chan, corr, shape, chan_idx, corr_idx): """ 1. Get channel and correlation from shape if not set and the shape is valid 2. Check they agree if they already agree Parameters ---------- chan : int Existing channel size corr : int Existing correlation size shape : tuple Array shape tuple chan_idx : int Index of channel dimension in ``shape``. corr_idx : int Index of correlation dimension in ``shape``. Returns ------- int Modified channel size int Modified correlation size """ if chan_idx != -1: array_chan = shape[chan_idx] # Corresponds to a None array, ignore if array_chan == -1: pass # chan is not yet set, assign elif chan == 0: chan = array_chan # Check consistency elif chan != array_chan: raise ValueError("Inconsistent Channel Dimension " "in Input Arrays") if corr_idx != -1: array_corr = shape[corr_idx] # Corresponds to a None array, ignore if array_corr == -1: pass # corr is not yet set, assign elif corr == 0: corr = array_corr # Check consistency elif corr != array_corr: raise ValueError("Inconsistent Correlation Dimension " "in Input Arrays") return chan, corr
42d6e1ae917555816e5776e2cf85b2239e26faa3
252,796
def str2bool(value): """ Args: value - text to be converted to boolean True values: y, yes, true, t, on, 1 False values: n, no, false, off, 0 """ return value in ['y', 'yes', 'true', 't', '1']
876a58c86b449ba3fac668a4ef2124ea31fda350
4,394
def get_board_stamp(board: list) -> str: """Get Board Stamp This function is used to obtain a string representation of a board state. Args: board (list): The board normal representation. Returns: str: The board string representation. """ board_stamp = "" for row in board: for element in row: board_stamp += str(element) return board_stamp
fab1a2cfd1a03efb195089e7ee00c714cd4767ef
312,883
def PrettifyFrameInfo(frame_indices, functions): """Return a string to represent the frames with functions.""" frames = [] for frame_index, function in zip(frame_indices, functions): frames.append('frame #%s, "%s"' % (frame_index, function.split('(')[0])) return '; '.join(frames)
714d6c7c8295fd927888bd898b73c665087be864
96,609
from typing import Dict def rb2coco_bbox( rb_label: Dict, label_id: int, image_id: int, category_id: int, width: int, height: int, ) -> Dict: """Convert rb bbox to coco bbox.""" assert rb_label["bbox2d"] xnorm = rb_label["bbox2d"]["xnorm"] ynorm = rb_label["bbox2d"]["ynorm"] wnorm = rb_label["bbox2d"]["wnorm"] hnorm = rb_label["bbox2d"]["hnorm"] return { "id": label_id, "image_id": image_id, "category_id": category_id, "bbox": [ int(xnorm * width), int(ynorm * height), int(wnorm * width), int(hnorm * height), ], "iscrowd": 0, "area": int(wnorm * width) * int(hnorm * height), "segmentation": [], }
4aa6d3b2c4c21d07e5b60df272f27de290169035
645,427
def layer_counts(layer): """counts the digits in a layer args: layer (a tuple of digits 0-9) returns: dictionary with keys 0-9, values are the frequency of the key """ counts = {i:layer.count(i) for i in range(10)} return counts
bf6096b4fb4e335b0b43aea566117f51d4fd2e60
76,827
def parse_string_range(s): """Parses an address range (e.g. 0x400000-0x401000)""" addrs = s.split("-") return map(lambda x: int(x, 16), addrs)
7a88cb13a70a1450485c9429915446d866023dfd
468,811
import re def striptags(html): """ Returns the string `html` with all html tags stripped. """ return re.sub(r'<[^>]*>', '', html)
8ea4d2288fd91d620db18b1e66b6803e1d9cd11c
236,375
def get_route_by_cidr(route_table, cidr): """Method to check if given CIDR already attached to route table. Args: RouteTable (obj): Route Table object. cidr (str): CIDR string to check in route table. Returns: Route: the route for this CIDR or None if not found """ return dict((r.destination_cidr_block, r) for r in route_table.routes).get(cidr)
878972bcd5b8ad0a216d3be77383c861027f4bb0
174,749
def file_lines_list(file): """ Reads a plaintext file, retuns a list of all non-blank whitespace-trimmed lines""" lines = [] with open(file) as file_object: for i, line in enumerate(file_object): the_line = line.strip() if the_line: lines.append(the_line) return lines
59d5c46afb165cfb797f0ce64238f60789b30d09
319,141
def hv(h, v, comm): """ Multiplication of `m = h v` where `h` is a horizontal distributed matrix (vector) on communicator, `v` is a vertical distributed matrix (vector) on communicator, and `m` is the result of multiplication Parameters ---------- h : (M, K) array_like Distributed matrix (vector). The matrix (vector) is distributed on given communicator `comm` horizontally. In each node of `comm`, `h` is with the shape (M, K). v : (K, N) array_like Distributed matrix (vector). The matrix (vector) is distributed on given communicator `comm` vertically. In each node of `comm`, `v` is with the shape (K, N). comm : MPI_comm MPI communicator. Returns ------- m : (M, N) array_like The result of multiplication. This matrix (vector) is distributed on given communicator `comm`. In each node of `comm`, `m` describe the same value. """ return comm.allreduce(h @ v)
48bfcb45a8e790e8087522aac1bf9c7e319b4629
297,597
def mean(xs): """Computes and returns the arithmetic mean of the numbers in xs.""" sum_xs = 0.0 for n in xs: sum_xs += n return sum_xs/len(xs)
23dd369bf521fd672a48f4ba730a6aa125966cde
262,384
def sorted_dict(a_dict): """Simple helper for sorting dictionaries.""" return dict( sorted(a_dict.items()), )
7e26d9c5189d32277a57956d847d7ae3acc8ab5d
345,899
def human_format(num): """Make a nice human readable format for numbers.""" magnitude = 0 while abs(num) >= 1000: magnitude += 1 num /= 1000.0 # add more suffixes if you need them return "%.2f%s" % (num, ["", "K", "M", "B", "T", "P"][magnitude])
02139008826774257a1acbb1f56a7883de866f49
619,518
import re def remove_languages(title, REGEX_LANGUAGES): """ Removes languages from the input title """ no_languages = re.search(REGEX_LANGUAGES, title) if no_languages != None: return title.replace(no_languages[0], '') else: return title
81de584de8864b63f1533c4752fdee1b4858756b
496,489
def get_pipeline_lines(input_pipeline): """Returns a list with the lines in the .cppipe file""" with open(input_pipeline) as f: lines = f.readlines() return lines
403e7531b1cadfe25f519d2b176b97ac344cde6b
704,243
def mat311mod(a, b): """ Compute moduli of a 3x1 matrix. Parameters ---------- a : tuple of float 3x1 matrix b : float modulus Returns ------- res : tuple of float 3x1 matrix """ res = [0, 0, 0] r3 = range(3) for i in r3: res[i] = int(a[i] - int(a[i]/b)*b) return res
275a509a4f53785c4702151811c63e5009d999c4
72,087
def format_mac(str_hex): """Accept a string of hexadecimal digits and return a string of MAC address format. Arguments: str_hex: a string of hexadecimal digits """ i = 1 str_mac = ':'.join([str_hex[i:i+2] for i in range(0, 12, 2)]) return str_mac
9b009984bd09d7ac80db51ae01176bcdcb0014b2
49,909
import re def input_must_match_regex(regex, error_message): """ The user must type an input that matches a regex. Return his input :param regex regex: Regular expression object :param str error_message: The error message that must be displayed :return: The user's valid answer :rtype: str """ answer = None while answer is None: answer = input() if re.search(regex, answer) is None: answer = None print("Please try again:") return answer
2b5dc53910b70e3ab16e3e4062ef22b1c291606a
389,570
def student_ranking(student_scores, student_names): """ :param student_scores: list of scores in descending order. :param student_names: list of names in descending order by exam score. :return: list of strings in format ["<rank>. <student name>: <score>"]. """ student_ranking = [] for index, score in enumerate(student_scores): result = f"{index + 1}. {student_names[index]}: {score}" student_ranking.append(result) return student_ranking
6cecbad9e41f8e50322fe372fcf0cf3bb5870f9c
438,147
def get_id(psco): """ Retrieve the persistent object identifier. :param psco: Persistent object. :return: Persistent object identifier. """ return psco.getID()
7a67b9e2d4dccdf633af466152c19897b2d626b3
283,981
def get_lines(text_string, sub_string): """Get individual lines in a text file Arguments: text_string {string} -- The text string to test sub_string {string} -- The conditional string to perform splitting on Returns: {list} -- A list of split strings """ lines = [line for line in text_string.split("\n") if sub_string in line] return lines
69c7f05167b2a4423011a44ec5b7e40045463d4e
673,551
import collections def longest_path(current_state): """Find longest possible path from the current state to the final state Args: current_state: StateForGraphs The state at the beginning of the search; the root of the tree. Returns: The maximum number of steps that can be used to get from current_state to a final state, using state.possible_next_states to find states reachable in one step from the current state See Also: StateForGraphs to understand the required methods for the states used in the graph. The states must implement __hash__, __eq__, possible_next_states, and is_final """ queue = collections.deque() discovered = {current_state: 0} queue.append(current_state) lengths = set() while queue: state = queue.popleft() num_steps = discovered[state] new_states = state.possible_next_states() for new_state in new_states: if new_state.is_final(): lengths.add(num_steps + 1) elif new_state not in discovered: queue.append(new_state) discovered[new_state] = num_steps + 1 return max(lengths)
8acca90179eaff3f8d06972aec0a63a8050fbcf9
689,086
def append_a(string): """Append "_a" to string and return the modified string""" string = '{}{}'.format(string, '_a') return string
7249063bbb9331de5163ef343399d51884312cd0
676,956
import json def read_json_submission(filepath): """Reads JSON submission""" try: with open(filepath, "r") as sub_f: submission_input = json.load(sub_f) return submission_input except Exception: # Can add validation of input parameters based on workflow here... raise ValueError("Input must be a valid json file")
0e81275deadec89d43bbb2b061693a27e690c0d0
398,126
def safe_str_cmp(a, b): """Compare two strings in constant time.""" if len(a) != len(b): return False r = 0 for c, d in zip(a, b): r |= ord(c) ^ ord(d) return r == 0
aa3ab9ca592cba926d3cdb94e1273825401119ac
642,368
def rightjustify(linetext, linecount, max_countwidth): """Justify ordered line to the right Arguments: linetext {str} -- a text to justify linecount {int} -- a line count (1-based) max_countwidth {int} -- a maximum count width Returns: str -- a justified line """ fill_length = max_countwidth - len(str(linecount)) + 1 return "{}.{}{}".format(linecount, " " * fill_length, linetext)
4ed7ecb2fe4c26fe944496d8b50e44889b4cb29e
646,394
def slugify_province(prov): """ Province name to slug i.e. lowercase, and spaces to dashes. """ return prov.replace(' ', '-').lower()
2a3c7a161640e46b635813745bf4663704c53629
545,544
import math def _dist(i_node, j_node, dist_type) -> float: """Returns the distance between two nodes. dist_type is either 'manhattan' or 'euclidean' """ if dist_type == 'manhattan': return (abs(float(i_node['cx']) - float(j_node['cx'])) + abs(float(i_node['cy']) - float(j_node['cy']))) # otherwise assume euclidean return math.sqrt( (float(i_node['cx']) - float(j_node['cx']))**2 + (float(i_node['cy']) - float(j_node['cy']))**2)
3cc03d68e0b22aa03ab089562124604287a9d087
123,713
def get_file_content(file_path: str) -> bytes: """Return the file content for the given filepath. """ with open(file_path, 'rb') as f: return f.read()
cedba414e95f650d7abdd308a3d42b4d0550f3ca
145,540
def solution(n: int = 998001) -> int: """ Returns the largest palindrome made from the product of two 3-digit numbers which is less than n. >>> solution(20000) 19591 >>> solution(30000) 29992 >>> solution(40000) 39893 """ answer = 0 for i in range(999, 99, -1): # 3 digit numbers range from 999 down to 100 for j in range(999, 99, -1): product_string = str(i * j) if product_string == product_string[::-1] and i * j < n: answer = max(answer, i * j) return answer
9b9c2363a1d9a304451901215f2f9686ce012e0c
535,845
import json def jsonify(data) -> str: """ Transform data to json :param data: Object :return: JSON strong with object """ return json.dumps(data)
1440d8fe2839f1e001ba660751eb7cbcfd375ec0
156,465
from datetime import datetime def fromtimestamp_ms(timestamp: int) -> datetime: """Construct datetime object from UTC timestamp in milliseconds. Args: timestamp (int): UTC time in milliseconds Returns: datetime """ return datetime.fromtimestamp(timestamp / 1000)
58d31ca8c50051b2494bdfcc150ea9889316eb74
332,102
from typing import List import re def collected_items(code: str) -> List[str]: """Extract passing and failing items from fnmatch_lines() call in code.""" # remove any lines with comments code_less_comments = re.sub(r"#.*?$", "", code, flags=re.MULTILINE) pattern = r'"[*](.* PASSED|.* FAILED)[*]"' return re.findall(pattern=pattern, string=code_less_comments)
6874a6330cd9ce8385d5a9d9c5457e430e00df6a
228,469
def size(obj): """Returns the size of an object in bytes""" try: return obj.__sizeof__() except: return 0
210c3197effab215c4fa446507cf28ad05be467d
246,858
def HIMassToFlux( M_HI, dist_Mpc ): """Converts H I mass (in solar masses) to equivalent H I flux (in Jy km/s) based on distance in Mpc. Equation originally from Giovanelli & Haynes (1988, in Galactic and extragalactic radio astronomy (2nd edition), p.522), based on Roberts (1975, n A. Sandage, M. Sandage, and J. Kristian (eds.), Galaxies and the Universe. Chicago: University of Chicago Press; p. 309). """ return M_HI / (2.356e5 * dist_Mpc**2)
86fc51cfb7c03281ae4339c0d4df9be371f0a0da
257,916
def _get_mapped_trackable(trackable, object_map): """Returns the mapped trackable if possible, otherwise returns trackable.""" if object_map is None: return trackable else: return object_map.get(trackable, trackable)
69666d4bf02e6d0a2605e951e125d868b9a96e1a
437,654
def MVFR(conf): """Get MVFR Color code from config""" return conf.get_color("colors", "color_mvfr")
a6a6bb1f29b3010f95e96276087d4ad80839ea8f
482,680
def create_base_config(log_dir, seed=123456789): """ Create the base configuration for the experiments. @param log_dir: The directory where this run should be created. @param seed: The random seed to use. @return: A dictionary containing the base configuration for the SP. """ return { 'ninputs': 100, 'trim': 1e-4, 'disable_boost': True, 'seed': seed, 'pct_active': None, 'random_permanence': True, 'pwindow': 0.5, 'global_inhibition': True, 'ncolumns': 200, 'nactive': 50, 'nsynapses': 75, 'seg_th': 15, 'syn_th': 0.5, 'pinc': 0.001, 'pdec': 0.001, 'nepochs': 10, 'log_dir': log_dir }
0a8669ff53b489d432d6063af64b081a9689281d
131,241
import base64 def _string_to_base64(my_str): """ Encodes string to base64 """ return base64.b64encode(bytes(my_str, 'utf-8')).decode()
0fccc30898d7f8dbb3b4a968bd2fbc396fd730ad
328,866
def output_handler(data, context): """Post-process TensorFlow Serving output before it is returned to the client. Args: data (obj): the TensorFlow serving response context (Context): an object containing request and configuration details Returns: (bytes, string): data to return to client, response content type """ if data.status_code != 200: raise ValueError(data.content.decode('utf-8')) response_content_type = context.accept_header prediction = data.content return prediction,response_content_type
7d1bbcb2310c4527c5ae9cfcd51be660532555df
701,827