content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def read_scalar(group, dataset_name): """ Read a HDF5 `SCALAR` as a dict. All attributes will be assigned as key: value pairs, and the scalar value will be assigned the key name 'value'. :param group: A h5py `Group` or `File` object from which to write the dataset to. :param dataset_name: A `str` containing the pathname of the dataset location. :return: A `dict` containing the SCALAR value as well as any attributes coupled with the SCALAR dataset. """ dataset = group[dataset_name] data = {k: v for k, v in dataset.attrs.items()} data["value"] = dataset[()] return data
6d9908e64f6584d0128756778679f87ffc8cb46f
697,094
def ma(error, params, offset): """ Calculate the moving average part. :param error: list of error terms :param params: list of coefficients :param offset: index of last predicted value :return: float """ return sum([params[i] * error[offset - i] for i in range(len(params))])
5ac853fca6a57dd4f349e79445b75b29f62ccbd7
697,095
import torch def my_sample_gumbel(shape, eps=1e-20): """Samples arbitrary-shaped standard gumbel variables. Args: shape: list of integers eps: float, for numerical stability Returns: A sample of standard Gumbel random variables """ #Sample from Gumbel(0, 1) U = torch.rand(shape).float() return -torch.log(eps - torch.log(U + eps))
21e471bf5fca80316d93ec2a96230471ddc83a45
697,097
def _get_package_dict1(reqs: str) -> dict: """Helper to parse requirements str into a dict of (package, version) k, v pairs """ return dict(line.split("==") for line in reqs.strip().splitlines())
8b851a871a2e2ef9d9c984831393bf30b29d7a02
697,098
def lowercase(lista): """Function to lowercase list of texts Args: lista ([list]): list of texts Returns: [list]: List of texts lowercased """ return [text.lower() for text in lista]
2be877aa3b80c5e01eb4237625b426123d5b9976
697,099
import re def get_text_between_parenthesis(string): """ Return a list with all the found substrings between parenthesis. If the strings doesn't contains parenthesis the function return an empty list :param string: a string :return: a list """ try: text_list = re.findall('\(([\d\w\s-]+)\)', string) except TypeError as e: text_list = [] return text_list
cce6abdb5618474848d9cff0f0ace1fab01f5b84
697,101
def get_elevated_session_input(response): """Create input for get_elevated_session.""" return { 'aws_access_key_id': response['Credentials']['AccessKeyId'], 'aws_secret_access_key': response['Credentials']['SecretAccessKey'], 'aws_session_token': response['Credentials']['SessionToken'] }
cede7d9a695953b8b0f57c6b58e5c69bf84d7887
697,102
def reverse_array(arr): """ Reverse an array along all axes, so arr[i,j] -> arr[-(i+1),-(j+1)]. """ reverse_slice = [slice(None, None, -1) for ii in arr.shape] return arr[reverse_slice]
659e4aea2f62287aeef6d3d4d80b3c58f937ace2
697,104
from functools import reduce def split_camel_case(text) -> list: """Splits words from CamelCase text.""" return list(reduce( lambda a, b: (a + [b] if b.isupper() else a[:-1] + [a[-1] + b]), text, [] ))
5a6f2fcfcdb378295554cc38a88679e054341068
697,105
def pageurl_template(category, pagenum, date): """ Function to return formatted URL given category, page number, date """ assert len(str(date)) == 8, 'Invalid input date format' return f"https://news.daum.net/breakingnews/{category}?page={pagenum}&regDate={date}"
e6a7c9c86c16cd775919588b918c6d49b8d3a5bd
697,107
def parse_transaction_weights(config_root): """ Parse the transaction types and weights from the XML configuration. Parameters ---------- config_root : xml.Element The root of the XML config file. Returns ------- transaction_weights : [dict] An array of dictionaries formatted as {"name": str(transaction_name), weight: int(value)} that corresponds to the transaction types and weights. Raises ------- KeyError If works.work.weights or transactiontype is not a key in the XML file. RuntimeError If there there are a different number of transaction types and weights. """ try: transaction_types = \ config_root.find('transactiontypes').findall('transactiontype') weights_values = \ config_root.find('works').find('work').findall('weights') except: raise KeyError( "Couldn't parse the config file for txn types and weights.") if len(transaction_types) != len(weights_values): raise RuntimeError("Mismatched number of txn types and weights.") weights = [] for txn_type, weight in zip(transaction_types, weights_values): txn_name = txn_type.find('name').text weights.append({'name': txn_name, 'weight': int(weight.text)}) return weights
085a963d95d7e14fcaf688317374f33373dc6c0d
697,109
def cumsum(vals): """Return a cumalative sum of vals (as a list).""" lst = [] tot = 0 for v in vals: tot += v lst.append(tot) return lst
e9ea4065fa7a044ae738e33dbd855cd1948e8c45
697,111
def count_measurements(report_uuid: str, database) -> int: """Return the number of measurements.""" return database.measurements.count_documents(filter={"report_uuid": report_uuid})
c9f20149ec975134ad7f0abab28a2c632f5295a1
697,115
def normalize_letters(one_letter_code) : """Convert RAF one-letter amino acid codes into IUPAC standard codes. Letters are uppercased, and "." ("Unknown") is converted to "X". """ if one_letter_code == '.' : return 'X' else : return one_letter_code.upper()
ad1d80a4663859194f84a31dc03001481b477503
697,117
def is_original_process_func(clsdict, bases, base_class=None): """Only wrap the original `process` function. Without these (minimal) checks, the `process` function would be wrapped at least twice (the original `process` function from the user's DoFn, and our wrapped/decorated one), essentially causing any call to `process` (and the decorator) to be called at least twice. Args: clsdict (dict): dictionary of items for the class being instantiated. bases (tuple(class)): base class(es) of the class being instantiated. Returns: (bool) whether or not to wrap the `process` method of the class being instantiated. """ if "process" not in clsdict: return False # ignore classes that don't inherit from our base class base_cls_names = [b.__name__ for b in bases] if base_class and base_class not in base_cls_names: return False # if the value of clsdict["process"] is not a meth/func if not callable(clsdict["process"]): return False # if the value of clsdict["process"] is already "new_process" if getattr(clsdict["process"], "__name__") != "process": return False return True
17598fa632fc4dc9c8e52613325e6448b3064559
697,119
def parse_identifiers(identifiers): """ Reads identifiers, which may be a string or list/tuple/set of objects instances with name instances as string, returning a frozen set of names. """ if isinstance(identifiers, str): return frozenset(identifiers.split(',')) if not isinstance(identifiers, (list, tuple, set)): identifiers = identifiers, keys = list(identifiers) for i, key in enumerate(keys): if not isinstance(key, str): assert hasattr(key, 'name'), \ "Each element in hashable tuple must be a string or have name attribute" key_name = key.name assert isinstance(key_name, str), \ "Each non-string hashable tuple element must a string name attribute" keys[i] = key_name return frozenset(keys)
285a12352607ecc6b4a0c9ceb6f0ca7e2f56988e
697,122
def csc_cumsum_i(p, c, n): """ p [0..n] = cumulative sum of c [0..n-1], and then copy p [0..n-1] into c @param p: size n+1, cumulative sum of c @param c: size n, overwritten with p [0..n-1] on output @param n: length of c @return: sum (c), null on error """ nz = 0 nz2 = 0.0 for i in range(n): p[i] = nz nz += c[i] nz2 += c[i] # also in double to avoid CS_INT overflow c[i] = p[i] # also copy p[0..n-1] back into c[0..n-1] p[n] = nz return int(nz2) # return sum (c [0..n-1])
ab567b6b357fc7e5e1b0a961b9b66d88487a0e7f
697,125
def boolean_flag(name, configurable, set_help='', unset_help=''): """Helper for building basic --trait, --no-trait flags. Parameters ---------- name : str The name of the flag. configurable : str The 'Class.trait' string of the trait to be set/unset with the flag set_help : unicode help string for --name flag unset_help : unicode help string for --no-name flag Returns ------- cfg : dict A dict with two keys: 'name', and 'no-name', for setting and unsetting the trait, respectively. """ # default helpstrings set_help = set_help or "set %s=True" % configurable unset_help = unset_help or "set %s=False" % configurable cls, trait = configurable.split('.') setter = {cls: {trait: True}} unsetter = {cls: {trait: False}} return {name: (setter, set_help), 'no-' + name: (unsetter, unset_help)}
dabb654a75123a79b865c1570e6ed74568c5ae41
697,127
def LevenshteinCost(a, b): """Cost function for Levenshtein distance with substitutions. Cost function for what is now thought of as the classical Levenshtein distance, which is the minimum number of insertions, deletions, and substitutions required to edit one sequence into another. Returns zero for matches, unity for indels and substitutions. Args: a: input symbol, or None for insertions b: output symbol, or None for deletions Returns: 0 for exact match 1 for mismatch / substitution 1 for insertion or deletion """ if a == b: return 0 else: return 1
8d82b0003d1fa00720270c33cfd4a119dd5f00c7
697,132
def pedersenOpen(n,g,h,m,r,c): """Open a pedersen commit. Arguments: n modulus (i.e. Z*_n) g generator 1 h generator 2 m message r random c commit generated by pedersenCommit() to verify""" if c == g**m*h**r % n: return True else: return False
4657cd68214566f4e2e8231b61067963478af9a1
697,139
def extend_indices(segments, margin): """ Decrease and increase the values of the first and last elements respectively in each list of segments by a given margin. The first indice of the first segment and the last indice of the last segments are not modified. Parameters ---------- segments : list The list of lists of first and last indices of segments. margin : int The extra extend to add on each side of segments. Example ------- >>> segments = split_range(16, 4) >>> extend_indices(segments, margin=1) [[0, 5], [3, 9], [7, 13], [11, 16]] """ if len(segments) == 1: return segments else: # first process the first and last segments segments[0][-1] += margin segments[-1][0] -= margin # if there are more than 2 segments for i in range(len(segments))[1:-1]: segments[i][0] -= margin segments[i][-1] += margin return segments
34da9377af9342a811fb40bdadcadce174fcb605
697,140
from datetime import datetime def datetime_to_ms(dt: datetime) -> int: """Convert a datetime to milliseconds.""" epoch = datetime.fromtimestamp(0, dt.tzinfo) return int((dt - epoch).total_seconds() * 1000.0)
c14128887dfa7201d68b055c2af5be56f5f8ab37
697,148
def get_length(key_list): """ Get length in the TLV :param key_list: Key to parse :return: Length in the TLV """ # Find tag value if 2 == int(key_list[0], 16): # pylint: disable=misplaced-comparison-constant # check MSB is zero, if yes then this byte indicates length # else this byte indicates number of bytes for length calculation if int(key_list[1], 16) & 0x80 == 0x00: length = int(key_list[1], 16) length += 2 else: length_bytes = int(key_list[1], 16) & 0x7f # Mask MSB length_header = 2 + length_bytes length = int(key_list[2], 16) for i in range(1, length_bytes): length = length * 0x100 length = length | int(key_list[2 + i], 16) length += length_header return length return 0
70b2b2069b9cd55f49da5711cfcd816e371affb8
697,149
def get_skidl_spice_ref(skidle_element): """ Helper function to retrieve SKiDL element name as appears in the final netlist Args: skidle_element (skidl.Part.Part): SKiDl part to get the netlist name from Returns: returns a string with the netlist name of `skidle_element`, or throws an error if `skidle_element` is not a SKiDl part """ #need to do this for now since in newer version skidl parts class is skidl.Part # older is skidl.part if repr(type(skidle_element))=="<class 'skidl.part.Part'>": assert repr(type(skidle_element))=="<class 'skidl.part.Part'>", '`skidle_element` must be a SKiDl part' else: assert repr(type(skidle_element))=="<class 'skidl.Part.Part'>", '`skidle_element` must be a SKiDl part' if skidle_element.ref_prefix!=skidle_element.ref[0]: return skidle_element.ref_prefix+skidle_element.ref else: return skidle_element.ref
4eacd423cbcdbc5171eb1f4b5ef81d6482f34843
697,151
def weighted_values(values, weights): """Returns paired lists of results and their GCM weights. Args: values: Dictionary of GCM -> result value weights: Dictionary of GCM -> GCM weight """ models = values.keys() values_list = [values[model] for model in models if model in weights] weights_list = [weights[model] for model in models if model in weights] return (values_list, weights_list)
6bd59e99d19d68535148f5bdf34bcec2f0cb4736
697,154
from typing import Tuple def set_add(arr: Tuple[str, ...]) -> int: """ >>> set_add(('UK', 'China', 'USA', 'France', 'New Zealand', 'UK', 'France')) 5 """ return len(set(arr))
e47705878c34400e3763a887cba4568dd5d621e4
697,155
def rank_index(square): """Gets the rank index of the square where ``0`` is the first rank.""" return square >> 3
d4d3f896b5a7cd8b7184320efd69a652c61ccbcf
697,156
def html_summary_table(data): """Generate HTML table of novelty detection statistics. data: dict A JSON like structure of the table data that has the following format: data = { 'modelA': { 'inlier_name': 'MNIST' 'outliers': { 'Fashion MNIST': { 'fpr_at_95_tpr': 0.02123, 'detection_error': 0.02373, 'auroc': 0.96573, 'aupr_in': 0.91231, 'aupr_out': 0.9852 }, 'EMNIST Letters': { 'fpr_at_95_tpr': 0.02123, 'detection_error': 0.02373, 'auroc': 0.96573, 'aupr_in': 0.91231, 'aupr_out': 0.9852, } } }, 'modelB': { 'inlier_name': 'MNIST' 'outliers': {...} } } """ table = """ <table> <tr> <th>Model</th> <th>Out-of-distribution dataset</th> <th>FPR (95% TPR)</th> <th>Detection Error</th> <th>AUROC</th> <th>AUPR In</th> <th>AUPR Out</th> </tr> """ for i, (model, model_data) in enumerate(data.items()): table += "<tr>" table += "<td rowspan={}><b>{}</b> ({})</td>".format(len(model_data['outliers']), model, model_data['inlier_name']) for j, (outlier_name, scores) in enumerate(model_data['outliers'].items()): if j != 0: table += "<tr>" table += "<td>{}</td>".format(outlier_name) table += "<td>{:.1f}</td>".format(scores['fpr_at_95_tpr'] * 100) table += "<td>{:.1f}</td>".format(scores['detection_error'] * 100) table += "<td>{:.1f}</td>".format(scores['auroc'] * 100) table += "<td>{:.1f}</td>".format(scores['aupr_in'] * 100) table += "<td>{:.1f}</td>".format(scores['aupr_out'] * 100) table += "</tr>" table += "</table>" return table
53162dfab20859b03408ff67c3a2421b0e84e33e
697,157
def hello ( name ): """ Say hello .""" return " hello " + name
3fd85b4c2f719e6c2abb7cd21bd7df2fe38c0272
697,158
def getSize(l): """ Returns size of list Example >>> getSize([0,4,32,21]) 4 """ c=0 for i in l: c=c+1 return c
ab59a55778ed4b8034debea20d33c1cb609a4243
697,159
def _miriam_identifiers(type_, namespace, identifier): """ Fix the MetaNetX identifiers into miriam equivalents. MetaNetX doesn't use correct miriam identifiers. This function maps the known namespace and entity identifiers used by MetaNetX to valid miriam identifiers. Parameters ---------- type_ : string "compartment", "reaction" or "metabolite" namespace : string The MetaNetX namespace identifier identifier : string The object identifier Returns ------- namespace : string The corrected namespace identifier : string The corrected identifier """ if type_ == "compartment": ns_map = { "bigg": "bigg.compartment", "cco": "cco", "go": "go", "name": "name", # unconfirmed "seed": "seed", } return (ns_map[namespace], identifier) elif type_ == "reaction": ns_map = { "bigg": "bigg.reaction", "deprecated": "metanetx.reaction", "kegg": "kegg.reaction", "metacyc": "metacyc.reaction", "reactome": "reactome", "rhea": "rhea", "sabiork": "sabiork.reaction", "seed": "seed.reaction", } return (ns_map[namespace], identifier) elif type_ == "metabolite": if namespace == "kegg": kegg_map = { "C": "kegg.compound", "D": "kegg.drug", "E": "kegg.environ", "G": "kegg.glycan", } return (kegg_map[identifier[0]], identifier) elif namespace == "slm": return ("swisslipid", f"SLM:{identifier}") elif namespace == "chebi": return (namespace, f"CHEBI:{identifier}") else: ns_map = { "bigg": "bigg.metabolite", "deprecated": "metanetx.chemical", "envipath": "envipath", # unconfirmed "hmdb": "hmdb", "lipidmaps": "lipidmaps", "metacyc": "metacyc.compound", "reactome": "reactome", "sabiork": "sabiork.compound", "seed": "seed.compound", } return (ns_map[namespace], identifier)
19218dbe7e086dabf6527b26e770986462428ecb
697,161
from typing import List def add_to_rightmost_int(stack: List, x: int) -> List: """ Add x to rightmost int in l if no int in l, do nothing return modified l """ int_locations = [isinstance(i, int) for i in stack] if not any(int_locations): return stack int_locations.reverse() last_index = len(int_locations) - 1 - int_locations.index(True) stack[last_index] += x return stack
84240d412539cc221edd24462f97bb84a1eca051
697,164
def mapToRange(val, src, dst): """ Map the given value from the range of src to the range of dst. """ return ((val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
769dd6f6c52b0c8cdc2b0358457820c0ed15a1f8
697,165
def is_true(boolstring: str): """ Converts an environment variables to a Python boolean. """ if boolstring.lower() in ('true', '1'): return True return False
466c839ff5ea25e970c9ce1a86fbcbc7d5fec3a1
697,166
import optparse def parse_shards_into(option, opt, value, parser): """Parse lists of shard or shard ranges into a set(). Examples: 0-2 0,1-3,5 1,3,5 """ def shard_range_parser(shards): result = set() for part in shards.split(','): x = part.split('-') result.update(range(int(x[0]), int(x[-1]) + 1)) return sorted(result) try: setattr(parser.values, option.dest, shard_range_parser(value)) except ValueError as e: raise optparse.OptionValueError('Failed to parse: %s' % e)
1262db211fe698f3e38b5b61e8dce1bddd2dcb21
697,169
def edits2str(edits): """ get a printable representation for a list of edits """ output_str = [edit.__str__() for edit in edits] return output_str
599afcd8636e0939b72ebc88ebb88743fd4fe6a6
697,171
import re def _get_enclosed_str(text, prefix, suffix): """ Remove prefix and suffix from the string if the string contains both prefix and suffix. """ success = False text = text.strip() pattern = f"^{prefix}.+{suffix}$" if re.search(pattern, text): pattern1 = f"^{prefix}" pattern2 = f"{suffix}$" text = re.sub(pattern1, "", text) text = re.sub(pattern2, "", text) success = True return text, success
4a4305d18308491f477ae929c4009af39f345472
697,173
def demo_app_name(name): """ Returns a capitalized title for the app, with "Dash" in front.""" return 'Dash ' + name.replace('app_', '').replace('_', ' ').title()
58e39a80c1940f784daa6e0f6ee35df2757c5ca6
697,178
def parse_italic(soup): """ Replace i tags with text:span with automatic style """ italic_tags = soup.find_all("em") for i_tag in italic_tags: i_tag.name = 'text:span' i_tag.attrs['text:style-name'] = "ITALIC" return soup
c2c58afa0a68328088900e5324f4908d74ddbb94
697,181
def to_dict(condset: set) -> dict: """ Create a dictionary of conditions with a unique integer value for each condition. :param condset: Conditions set. :return: Dictionary of all conditions with integer values. """ conds = {} index = 0 for item in condset: conds[str(item)] = index index += 1 return conds
3fd9953139ac3785aeb2ba527bc0365e3908f376
697,185
def GL2PL(gl): """ Converts Genotype likelyhoods to phred scaled (PL) genotype likelyhoods. """ return -int(gl * 10)
d842a4a25ee6b095cfb456ab21965df4ccc24c1d
697,189
import socket def port_to_int(port): """Convert a port string to an integer.""" try: return int(port) except ValueError: return socket.getservbyname(port)
9f4ca2d80fdb70e16aaa0f0ccfa027fed772a986
697,191
def expand_markings(granular_markings): """Expand granular markings list. If there is more than one selector per granular marking. It will be expanded using the same marking_ref. Example: >>> expand_markings([ ... { ... "selectors": [ ... "description", ... "name" ... ], ... "marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9" ... } ... ]) [ { "selectors": [ "description" ], "marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9" }, { "selectors": [ "name" ], "marking_ref": "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9" } ] Args: granular_markings: The granular markings list property present in a SDO or SRO. Returns: list: A list with all markings expanded. """ expanded = [] for marking in granular_markings: selectors = marking.get('selectors') marking_ref = marking.get('marking_ref') lang = marking.get('lang') if marking_ref: expanded.extend( [ {'marking_ref': marking_ref, 'selectors': [selector]} for selector in selectors ], ) if lang: expanded.extend( [ {'lang': lang, 'selectors': [selector]} for selector in selectors ], ) return expanded
1d97c93b7953293cd71a86d06e988373f0a9150d
697,192
def first_nonequal_idx(left: str, right: str) -> int: """ Find first string index where left and right strings do not match In [1]: first_nonequal_idx("", "californian") Out[1]: 0 In [2]: first_nonequal_idx("aba", "abc") Out[2]: 2 Note, if the strings match, the first-non-equal index will be equal to the length of the string: In [3]: first_nonequal_idx("aba", "aba") Out[3]: 3 """ idx = 0 max_search_len = min(len(left), len(right)) while idx < max_search_len and left[idx] == right[idx]: idx += 1 return idx
7bc0397dd290c6005adc96b8b45f14e04385a257
697,194
def is_viable_non_dupe(text: str, comparison) -> bool: """text must be longer than 2 ('""'), not 'NULL' and not in comparison. :param text: String to be tested. :param comparison: Dictionary or set to search for text in. :return: bool """ return 2 < len(text) and text != 'NULL' and text not in comparison
da575dffde2f13e849949350aabe73f40802f14a
697,200
def _proc(tr, sampling_rate=10): """ Basic processing including downsampling, detrend, and demean. :param tr: raw trace :param sampling_rate: :return tr: trace after processing """ # deep copy tr2 = tr.copy() tr2.interpolate(sampling_rate) tr2.detrend(type="linear") tr2.detrend(type="demean") return tr2
99dab5d384b9db80b257c8bd0068f96cbc82c532
697,202
def get_neighbors(point): """Given a 2D point (represented as a Point object), returns a list of the four points that neighbor it in the four coordinate directions. Uses the "copy" method to avoid modifying the original point.""" p1 = point.copy() p2 = point.copy() p3 = point.copy() p4 = point.copy() p1.setX(point.getX() - 1) p2.setX(point.getX() + 1) p3.setY(point.getY() - 1) p4.setY(point.getY() + 1) points = [] points.append(p1) points.append(p2) points.append(p3) points.append(p4) return points
67df39fe0d6fd61fed70f1521b13754d1230ac9e
697,203
from typing import Dict def read_config(return_config: str, config: Dict): """ Reads the config file and goes through the config dictionary to load all of the parameters. Depending on whether return_config is dashboard or logger it returns the necessary information to create the dashboard or the logger. If any of the logger or dashboard options are not included in the config file, returns them as None. The constructor handles default values if this happens. :param return_config: String indicating what information I am asking for. It should only be 'logger' or 'dashboard' respectively. :param config: The dictionary that needs to be read. """ lg_parameters = [] dash_plots = [] refresh = None ips = None load_directory = None save_directory = None for key in config.keys(): # check if the key is options and load the specified settings. if key == 'options': if 'refresh_rate' in config[key]: refresh = config[key]['refresh_rate'] if 'allowed_ip' in config[key]: ips = config[key]['allowed_ip'] if 'load_and_save' in config[key]: load_directory = config[key]['load_and_save'] save_directory = config[key]['load_and_save'] else: if 'save_directory' in config[key]: save_directory = config[key]['save_directory'] if 'load_directory' in config[key]: load_directory = config[key]['load_directory'] elif key == 'plots': for plot in config[key].keys(): # check what information it needs if return_config == 'logger': for params in config[key][plot].keys(): # default configs. If they exist in config they will get overwritten. Used for constructor. server_param = 'localhost' port_param = 5555 interval_param = 1 # check if the optional options exist in the dictionary and overwrites them if they do. if 'server' in config[key][plot][params]: server_param = config[key][plot][params]['server'] if 'port' in config[key][plot][params]: port_param = config[key][plot][params]['port'] if 'options' in config[key][plot][params]: if 'interval' in config[key][plot][params]['options']: interval_param = config[key][plot][params]['options']['interval'] # a tuple with the specified parameters for the logger name = params source_type = config[key][plot][params]['source_type'] parameter_path = config[key][plot][params]['parameter_path'] # appends the tuple with the information for the parameters constructor lg_parameters.append((name, source_type, parameter_path, server_param, port_param, interval_param)) elif return_config == 'dashboard': name_list = [] for params in config[key][plot].keys(): # append the names of the parameter name_list.append(params) # append a touple with the plot and a list of the parameters. dash_plots.append((plot, name_list)) # returns the correct information for each object if return_config == 'logger': return lg_parameters, refresh, save_directory elif return_config == 'dashboard': return dash_plots, refresh, load_directory, ips
cc91ebe633f01259fa3344f93698ab806ee9d9fc
697,206
def total_yngve_depth(yngve_tree_root): """Returns the total depth of the ynvge tree of the sentence Args: yngve_tree_root (obj): The root node Returns: int: The total depth of the yngve tree """ tot_score = 0 for leaf in yngve_tree_root.leaves: tot_score += leaf.score return tot_score
9220147ed529bac780b7cb5e60ab2364af47f9f6
697,209
def is_function(f): """ Is it a function? :param f: function :return: boolean """ return hasattr(f, '__call__')
c330b81c3b09a0ba8e475e322df5f90d89e39e21
697,210
def inRects(R, x, y): """inRects returns True if (x, y) is in any of the rectangles in R. """ return any(x0 <= x < x1 and y0 <= y < y1 for x0, y0, x1, y1 in R)
f2e4a5c5d60e4f37ee1a8ec9d54a050b3e12b022
697,211
def load_corpus(corpus_path, proc_mode=0): """Load the corpus from disk.""" corpus_text="" with open(corpus_path, 'r') as corpusFile: corpus_text=corpusFile.read() return corpus_text
045ed4aa97a6e685edefaf03d8f45cd5afb51526
697,214
import token def decompose_name(node): """ NOTE: Per the lib2to3 grammar: dotted_name: NAME ('.' NAME)* This means that dotted_name can be either dotted or not dotted, i.e. it's a generalized form of NAME. So this function will cover both cases. Given a dotted_name node this will return a tuple of the form (pkg, name, full_string) where all are str ex: a.b.c => (a.b, c, a.b.c) b.c => (b, c, b.c) c => (None, c, c) otherwise it will return None for each field """ if node.type == token.NAME: # node is just a name, no dots return '', node.value, node.value if node.children: # Right most node will be the name, i.e. a.b.c = ['a','.','b','.','c'] name_node = node.children[-1] package_nodes = node.children[:-2] name = str(name_node).strip() package = ''.join(str(n).strip() for n in package_nodes) full = ''.join(str(n).strip() for n in node.children) return package, name, full return None, None, None
2be377913f6dd2a13335d29b0f932de6ebe35c12
697,216
from pathlib import Path from typing import Callable from typing import List def find_paths(path: Path, pattern: str, filter: Callable[[Path], bool]) -> List[Path]: """ Glob pattern relatively to path and filter results by predicate. """ return [x for x in sorted(path.glob(pattern), key=str) if filter(x)]
cce368e6dc3b97f715b3f82a5cc5837942d600b9
697,218
def abv(og, fg, from_carbonation=0): """Work out alcohol content from fermentation data (optionally including carbonation) """ value = (float(og) - float(fg)) / 1.938 + float(from_carbonation) return float(value)
d3cf1e0c645d07bf98c70f1b087f0f1213e4bf21
697,221
def find_remote_addr(req): """Determine the correct IP address of the requester.""" if req.headers.get('CF-Connecting-IP'): return req.headers.get('CF-Connecting-IP') if req.headers.get('X-Forwarded-For'): return req.headers.get('X-Forwarded-For') return req.remote_addr
ddef91a116fb47a12acd71417b70af88484fd780
697,225
def get_rectangle_edges(x, y, width, height): """Return the 4 edges of a rectangle as a list. Edges are in clock-wise order, starting from the top. Each edge is returned as ``(start_point, end_point)`` and each point as ``(x, y)`` coordinates. """ # In clock-wise order, starting on top left corners = [ (x, y), (x + width, y), (x + width, y + height), (x, y + height)] # clock-wise order, starting on top right shifted_corners = corners[1:] + corners[:1] return zip(corners, shifted_corners)
e0738bd8d742eb9f9ae076e2e89031cd8eb74796
697,232
import math def comb(n,r): """Combinations of n objects by r, namely picking r among n possible. comb(n,r) = n!/(r!(n-r)!) """ return math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
61944512cec3555bc15efb73e1ca90510c86caa9
697,234
import re def safe_dag_id(s: str) -> str: """ Remove invalid characters for dag_id """ return re.sub('[^0-9a-zA-Z_]+', '_', s)
950dd59baaea5b0a94b1d5f67e8699c8779dae15
697,239
import yaml def unicode_representer(self, value): """ Represents unicode strings as regular strings. """ return yaml.ScalarNode(tag='tag:yaml.org,2002:str', value=value)
7eaa92e17be6fa707cb84cdea877ee451dc48694
697,240
def rnn_args_from_config(rnn_config): """ Takes a Config object corresponding to RNN settings (for example `config.algo.rnn` in BCConfig) and extracts rnn kwargs for instantiating rnn networks. """ return dict( rnn_hidden_dim=rnn_config.hidden_dim, rnn_num_layers=rnn_config.num_layers, rnn_type=rnn_config.rnn_type, rnn_kwargs=dict(rnn_config.kwargs), )
54cf542122036510c70fe7a53a47dd724880a912
697,242
from typing import Any from typing import Tuple from typing import Dict import json def flask_http_response(status: int, data: Any) -> Tuple[str, int, Dict[str, str]]: """Create a tuple for flask to return Args: status: integer http status to use data: json dumpable data for the return body """ return json.dumps(data, separators=(",", ":")), status, {"Content-Type": "application/json"}
33584b4c66f08174ca7ad93e1b11193707a65892
697,244
import requests def get_response_commits(commits_url, my_token): """ Wrapper around the function `requests.get()`. If my_token is supplied by me (i.e. it's not an empty string), then use authentication. Otherwise, go ahead without authentication. Args: commits_url: URL with JSON metadata for commits. my_token: your own GitHub personal access token for authentication. Authenticated requests get a higher hourly API rate limit. Returns: Response object for no. of commits by the GitHub user on the API. """ if my_token: response_commits = requests.get(commits_url, headers={'Authorization': 'token %s' % my_token}) else: response_commits = requests.get(commits_url) return response_commits
7d8350540944aef18631b9122e1180d21f9a6f8a
697,247
def get_locale_parts(locale): """Split a locale into three parts, for langauge, script, and region.""" parts = locale.split('_') if len(parts) == 1: return (parts[0], None, None) elif len(parts) == 2: if len(parts[1]) == 4: # parts[1] is a script return (parts[0], parts[1], None) else: return (parts[0], None, parts[1]) else: assert len(parts) == 3 return tuple(parts)
6a2a5f8600470ff13323482c478dcb1494b410a7
697,249
import re def strip_markdown_directives(line): """strips markdown directives from a line""" line = line.strip() if line.startswith("<") and line.endswith(">"): # Let's assume it's inline HTML and skip it return "" # Remove URLs (assume remote starts with http and local ends with html) line = re.sub(r'\[(.+?)]\(http[^\)]+\)', r'\1', line) line = re.sub(r'\[(.+?)]\(.+?html\)', r'\1', line) line = re.sub(r'<http:.+?>', r'', line) return line
8a1ad9076d058ecabbce0720d3c960601c14e3de
697,250
import random def pick_random_worker_set(worker_sets): """Pick random set of workers""" return random.choice(worker_sets)
86725e54659e0577ef5ff548e0b997261f876c75
697,251
import json def is_valid_json(stuff): """Checks if a string is valid json.""" try: json.loads(stuff) except: return False else: return True
f949b00d31fe682c1974e93619a79a573802a2fe
697,253
def package_installed(module, package_name): """ Determine if the package is already installed """ cmd = ['pacman', '-Q', package_name] exit_code, _, _ = module.run_command(cmd, check_rc=False) return exit_code == 0
5382a160e3c55d23bcf20b6bd8d360b11cd71410
697,254
def set_ipu_model_options(opts, compile_ipu_code=True): """Set the IPU Model options. Args: compile_ipu_code: Whether or not to actually compile real IPU code for modelling. Returns: The IpuOptions configuration protobuf, with IPU model options set. """ opts.ipu_model_config.compile_ipu_code = compile_ipu_code return opts
d5e9577fb9ebad81b6fedb1988561197dbd3028e
697,255
def _ExtractResNetThroughput(output): """Extract throughput from Horovod output. Args: output: Horovod output Returns: A tuple of: Average throuput in images per second (float) Unit of the throughput metric (str) """ # Start from last line and iterate backwards. avg_throughput = 0 for line in output.splitlines()[::-1]: if 'train_throughput' in line: split_line = line.split() avg_throughput = float(split_line[-1]) break return round(avg_throughput, 1), 'images/second'
671d745b0f73e9a84fa9a8b55f45054c711329c0
697,256
from typing import Any def map_index(x: Any, column_map: dict) -> str: """Makes column list index human-readable.""" return f'{column_map[x]}'
ab7c371cbcb9949e66a9a8adcc153c2e85646d01
697,257
def clsName2Ind(lbls, cls): """ Converts a cls name to an ind """ if cls in lbls: return lbls.index(cls) + 1 else: raise ValueError('unknown class')
15676bb297e42562a02b7e9af0c8d9de6fb890df
697,263
import six def load_from_hparams_overrides(params, params_source, hparams_overrides): """Given a dictionary of hyperparameters and a list of overrides, merge them. Args: params: Python dict containing a base hyperparameters set. params_source: Python dictionary to record source of hyperparameters. hparams_overrides: Python list of strings. This is a set of k=v overrides for the hyperparameters in `params`; if `k=v1` in `params` but `k=v2` in `hparams_overrides`, the second value wins and the value for `k` is `v2`. Returns: Python dict of hyperparameters. """ if params is None: raise ValueError( 'Input dictionary is empty. It is expected to be loaded with default ' 'values') if not isinstance(params, dict): raise ValueError( 'The base hyperparameters set must be a Python dict, was: {}'.format( type(params))) if hparams_overrides is None: return params, params_source if isinstance(hparams_overrides, six.string_types): hparams_overrides = [hparams_overrides] if not isinstance(hparams_overrides, list): raise ValueError( 'Expected that hparams_overrides would be `None`, a single string, or a' ' list of strings, was: {}'.format(type(hparams_overrides))) for kv_pair in hparams_overrides: if not isinstance(kv_pair, six.string_types): raise ValueError( 'Expected that hparams_overrides would contain Python list of strings,' ' but encountered an item: {}'.format(type(kv_pair))) key, value = kv_pair.split('=') parser = type(params[key]) if parser is bool: params[key] = value not in ('0', 'False', 'false') else: params[key] = parser(value) params_source[key] = 'Command-line `hparams` flag' return params, params_source
2365a3ce67d4855662912bbd865e1e731648e7b1
697,266
def flatten_report_for_csv(report): """ Flattens the data structure returned by `watson.report()` for a csv export. Dates are formatted in a way that Excel (default csv module dialect) can handle them (i.e. YYYY-MM-DD HH:mm:ss). The result is a list of dictionaries where each element can contain two different things: 1. The total `time` spent in a project during the report interval. In this case, the `tag` value will be empty. 2. The partial `time` spent in a tag and project during the report interval. In this case, the `tag` value will contain a tag associated with the project. The sum of all elements where `tag` is empty corresponds to the total time of the report. """ result = [] datetime_from = report['timespan']['from'].format('YYYY-MM-DD HH:mm:ss') datetime_to = report['timespan']['to'].format('YYYY-MM-DD HH:mm:ss') for project in report['projects']: result.append({ 'from': datetime_from, 'to': datetime_to, 'project': project['name'], 'tag': '', 'time': project['time'] }) for tag in project['tags']: result.append({ 'from': datetime_from, 'to': datetime_to, 'project': project['name'], 'tag': tag['name'], 'time': tag['time'] }) return result
a7af0385bb846aa7d88e5e5ef939c40712ea5f2e
697,267
import re def postfinance_preprocess_notice(payment_notice): """Remove spaces from potential invoice numbers""" return re.sub( r"\b([0-9]{4}\s*-\s*[0-9]{4}\s*-\s*[0-9]{4})\b", lambda match: re.sub(r"\s+", "", match.group(0)), payment_notice, )
ce806fda4c63030b21d3e1747d31349b3757dadb
697,270
import ipaddress def _evaluate_ip_address(ip_address): """Evaluate supplied IPv4 address. Returns the supplied IPv4 address if valid and specified without a netmask, or returns the subnet broadcast address if the supplied IPV4 address is specified with a netmask such as '192.168.1.5/24' or '192.168.1.5/255.255.255.0'. Parameters ---------- ip_address : str Supplied IP address. Returns ------- str Valid IPv4 address. Raises ------ ValueError If `ip_address` does not contain a valid IPv4 address. """ ip = ip_address.strip() try: ip = str(ipaddress.IPv4Address(ip)) except ipaddress.AddressValueError: try: ip = str(ipaddress.IPv4Network(ip, strict=False).broadcast_address) except Exception as e: raise ValueError(f"[Error] Invalid IP address: {ip_address}") from e return ip
67b45d1ba169c9880c68d800efb944ff05f2c51b
697,275
from typing import Callable def get_linear_anneal_func( start_value: float, end_value: float, start_step: int, end_step: int ) -> Callable: """Create a linear annealing function. Parameters ---------- start_value : float Initial value for linear annealing. end_value : float Terminal value for linear annealing. start_step : int Step to start linear annealing. end_step : int Step to end linear annealing. Returns ------- linear_anneal_func : Callable A function that returns annealed value given a step index. """ def linear_anneal_func(step): if step <= start_step: return start_value if step >= end_step: return end_value # Formula for line when two points are known: # y1 - y0 # y - y0 = --------- (x - x0) # x1 - x0 return (end_value - start_value) / (end_step - start_step) * ( step - start_step ) + start_value return linear_anneal_func
affe767318be5b07dfcf23d1a3e2e58e67750611
697,276
def bump_version(base: str, index: int = -1) -> str: """ Increment one of the numerical positions of a version. :param base: Version core, such as 0.1.0. Do not include pre-release identifiers. :param index: Numerical position to increment. Default: -1. This follows Python indexing rules, so positive numbers start from the left side and count up from 0, while negative numbers start from the right side and count down from -1. :return: Bumped version. """ bases = [int(x) for x in base.split(".")] bases[index] += 1 limit = 0 if index < 0 else len(bases) i = index + 1 while i < limit: bases[i] = 0 i += 1 return ".".join(str(x) for x in bases)
48d5c85c106e87733702f33dfcdd7c654478949e
697,279
import torch def ssim(prediction: torch.Tensor, label: torch.Tensor) -> torch.Tensor: """ Function computes the structural similarity Source: https://github.com/ChristophReich1996/CellFlowNet :param prediction: (torch.Tensor) Prediction :param label: (torch.Tensor) Label :return: (torch.Tensor) SSMI value """ assert prediction.numel() == label.numel(), 'Prediction tensor and label tensor must have the number of elements' # Calc means and vars prediction_mean = prediction.mean() prediction_var = prediction.var() label_mean = label.mean() label_var = label.var() # Calc correlation coefficient correlation_coefficient = (1 / label.numel()) * torch.sum((prediction - prediction_mean) * (label - label_mean)) return ((2.0 * prediction_mean * label_mean) * (2.0 * correlation_coefficient)) / \ ((prediction_mean ** 2 + label_mean ** 2) * (prediction_var + label_var))
2cdf069823f195f9730b13d00d48a25c13bc4884
697,280
def get_network_id(networks, name): """ Get network id based on name provided """ for network in networks: if network["Name"] == name: return network["Id"]
12e53ade2d661587a674435d8c57160d740aa48c
697,286
import torch def normal_transform_pixel(shape): """ Compute the normalization matrix from image size in pixels to [-1, 1]. """ tr_mat = torch.tensor([[1.0, 0.0, 0.0, -1.0], [0.0, 1.0, 0.0, -1.0], [0.0, 0.0, 1.0, -1.0], [0.0, 0.0, 0.0, 1.0]]) for idx in range(len(shape)): tr_mat[idx, idx] = tr_mat[idx, idx] * 2.0 / (shape[idx] - 1.0) tr_mat = tr_mat.unsqueeze(0) return tr_mat
8f178255108e565d9156cb01a8da5b88cd46c73a
697,288
def read_messages(msg_file): """(file open for reading) -> list of str Precondition: The parameter msg_file should be a message file that is already open for reading and that file contains one message per line. Read and return the contents of the file as a list of messages. The returned message should strip the newline from each line, and in order in which they appear in the file. """ new_message = [] for line in msg_file: new_message.append(line.strip()) # Since The returned message should strip the newline from each line. return new_message
220cda7e055f9c3b6a483500fe3aeef7d9752b70
697,295
from typing import Dict from typing import List def avoid_body(my_head: Dict[str, int], body: List[dict]): """ my_head: Dictionary of x/y coordinates of the Battlesnake head. e.g. {"x": 0, "y": 0} my_body: List of dictionaries of x/y coordinates for every segment of a Battlesnake. e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ] possible_moves: List of strings. Moves to pick from. e.g. ["up", "down", "left", "right"] return: The list of remaining possible_moves, with the 'neck' direction removed """ bad_moves = set() right = { "x": my_head["x"] + 1, "y": my_head["y"] } left = { "x": my_head["x"] - 1, "y": my_head["y"] } up = { "x": my_head["x"], "y": my_head["y"] + 1 } down = { "x": my_head["x"], "y": my_head["y"] - 1 } for part in body: print(f"{part} <> {right}") if ((part["x"] == right["x"] and part["y"] == right["y"])): print("ADD RIGHT") bad_moves.add("right") print(f"{part} <> {left}") if ((part["x"] == left["x"] and part["y"] == left["y"])): print("ADD LEFT") bad_moves.add("left") print(f"{part} <> {up}") if ((part["x"] == up["x"] and part["y"] == up["y"])): print("ADD UP") bad_moves.add("up") print(f"{part} <> {down}") if ((part["x"] == down["x"] and part["y"] == down["y"])): print("ADD DOWN") bad_moves.add("down") return bad_moves
3d7a151b694c6a6915f9c7c508cf7cd0e2e4d6f0
697,298
def get_area(a, b): """Calculate area of rectangle with sides a and b.""" return a * b
2907400c82c018634daf51091b509ab65258c269
697,301
def macro_with_both(name, number = 3, *args, **kwargs): """Oh wow this macro has both. Not much else to say. Args: name: The name of the test rule. number: Some number used for important things *args: Other arguments to include **kwargs: Other attributes to include Returns: An empty list. """ _ignore = [name, number, args, kwargs] return []
b1d1fce22662830de6ea1a9c3eee677c48c65a23
697,303
def increment_duplicates(arr): """Increments duplicates in an array until there are no duplicates. Uses a hash set to keep track of which values have been seen in the array. Runs in O(n^2) time worst case (e.g. all elements are equal), O(n) time best case (elements are already unique). """ seen = set() for i in range(len(arr)): while arr[i] in seen: arr[i] += 1 seen.add(arr[i]) return arr
33a456c75b7ca00ddb535a6726af3fe1944efc21
697,305
def _date(api): """Returns UTC YYYY.MM.DD to use in tags.""" return api.time.utcnow().strftime('%Y.%m.%d')
91e5c4c0bafa00b33e2b9c22d28b255a50f0d091
697,309
def lreplace(a, b, string): """ Replaces the head of the string. """ if string.startswith(a): return b + string[len(a):] return string
73ac9d588c98350699bdcd0e2ea70035cd17c77c
697,312
import torch def one_hot_encode(y: torch.Tensor, num_classes: int) -> torch.Tensor: """ Creates the one hot encoding of the provided class labels. :param y: class labels of size (batch_size,) :return y_one_hot: one hot encoding of size (batch_size, num_classes) """ return torch.zeros(y.size(0), num_classes).scatter_(1, y.view(-1, 1), 1.)
ba1cc5d556e17fa7d797efa32e697a9bdac4a3c0
697,313
def color_distance(c1, c2): """ Metric to define the visual distinction between two (r,g,b) colours. Inspired by: https://www.compuphase.com/cmetric.htm :param c1: (r,g,b) colour tuples. r,g and b are values between 0 and 1. :param c2: (r,g,b) colour tuples. r,g and b are values between 0 and 1. :return: distance: float representing visual distinction between c1 and c2. Larger values = more distinct. """ r1, g1, b1 = c1 r2, g2, b2 = c2 mean_r = (r1 + r2) / 2 delta_r = (r1 - r2) ** 2 delta_g = (g1 - g2) ** 2 delta_b = (b1 - b2) ** 2 distance = (2 + mean_r) * delta_r + 4 * delta_g + (3 - mean_r) * delta_b return distance
1f63d563d71373d7c27e446794745bcb2631e013
697,314
def tochr(lst): """Converts every value of a list into a character""" return [chr(i) for i in list(lst)]
36bf8ead73aaf079cbf1f2d5f3cd243266f7fbd2
697,315
def find_pareto_front(population): """Finds a subset of nondominated individuals in a given list :param population: a list of individuals :return: a set of indices corresponding to nondominated individuals """ pareto_front = set(range(len(population))) for i in range(len(population)): if i not in pareto_front: continue ind1 = population[i] for j in range(i + 1, len(population)): ind2 = population[j] # if individuals are equal on all objectives, mark one of them (the first encountered one) as dominated # to prevent excessive growth of the Pareto front if ind2.fitness.dominates(ind1.fitness) or ind1.fitness == ind2.fitness: pareto_front.discard(i) if ind1.fitness.dominates(ind2.fitness): pareto_front.discard(j) return pareto_front
2e53edda5e3d3afd541cedff3bb3d247f913969c
697,316
def __walk_chain(rel_dict, src_id): """ given a dict of pointing relations and a start node, this function will return a list of paths (each path is represented as a list of node IDs -- from the first node of the path to the last). Parameters ---------- rel_dict : dict a dictionary mapping from an edge source node (node ID str) to a set of edge target nodes (node ID str) src_id : str Returns ------- paths_starting_with_id : list of list of str each list constains a list of strings (i.e. a list of node IDs, which represent a chain of pointing relations) """ paths_starting_with_id = [] for target_id in rel_dict[src_id]: if target_id in rel_dict: for tail in __walk_chain(rel_dict, target_id): paths_starting_with_id.append([src_id] + tail) else: paths_starting_with_id.append([src_id, target_id]) return paths_starting_with_id
24b3bea13f7d802043b595e48bedc6da2a782370
697,317
from typing import Any import importlib def dynamic_import_from(source_file: str, class_name: str) -> Any: """Do a from source_file import class_name dynamically Args: source_file (str): Where to import from class_name (str): What to import Returns: Any: The class to be imported """ module = importlib.import_module(source_file) return getattr(module, class_name)
90c861f727c8e6f20f89b7af24c2163bc65bd516
697,319
def findAllPerson(tx): """ Method that finds all the nodes Person in the data base :param tx: is the transaction :return: a list of nodes """ query = ( "MATCH (p:Person) " "RETURN p , ID(p);" ) results = tx.run(query).data() return results
c5e5af2c1267b7bc057135dc4cb64e2bb2c23198
697,323
def integral_image(image): """Create an integral image on the first 2 dimensions from the input. Args: image -- ndarray with ndim >= 2 Returns an integral image where every location i,j is the cumulative sum of all preceeding pixels. """ return image.cumsum(1).cumsum(0)
567228d7089db2a58047bd5123a371c516198ab1
697,324
def hex_short(value): """ Give an integer in value, convert it to a hexidecial but remove the 0x :param value: the integer :return: the shorted version of hex """ hex_value = hex(value)[2:] if len(hex_value) == 1: hex_value = f"0{hex_value}" return hex_value
d7bb12b06001c44bdddfbe0c4f8d810c27862601
697,328
def lca(T, v, w): """ The lowest common ancestor (LCA) of two nodes v and w in a tree or directed acyclic graph (DAG) T is the lowest (i.e. deepest) node that has both v and w as descendants, where we define each node to be a descendant of itself (so if v has a direct connection from w, w is the lowest common ancestor). """ if T is None: return None # This is the LCA if T.value == v or T.value == w: return T # Explore subtrees. left_lca = lca(T.left, v, w) right_lca = lca(T.right, v, w) if left_lca and right_lca: return T return left_lca if left_lca is not None else right_lca
1f0235c6364803aae4a28a9ad72ab433f22ef2a5
697,330
import re def is_uuid(uuid): """ Check if value is a proper uuid :param uuid: string to check :return: bool """ UUID_PATTERN = re.compile(r'^[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}$', re.IGNORECASE) if UUID_PATTERN.match(uuid): return True return False
091464f66018c9913c19dcf307c1e193348c7023
697,331
from pathlib import Path import json def parse_annotation_background_width(bpmn_path: Path): """Get the width the image was resized to when annotating in the BPMN Annotator tool""" assert bpmn_path.suffix == ".bpmn", f"{bpmn_path}" img_meta_line = bpmn_path.read_text().split("\n")[1] assert img_meta_line.startswith( "<!--" ), f"{bpmn_path} has no meta line, line 1: {img_meta_line}" img_meta = json.loads(img_meta_line.replace("<!-- ", "").replace(" -->", "")) return img_meta["backgroundSize"]
aa1315aa2301a823d5e6fe83089698edd7600e4b
697,332