content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def create_k_mer_set(seq, kmer_len, stride_len): """ Create set of k-mers from the sequence. Parameters ---------- seq : string sequence to perform bottom-k sketch on kmer_len : int the length of the k-mers of the sequence stride_len : int the stride length in extracting k-mers from the sequence Returns ------- kmer_set : set of strings The set of k-mers from the sequence """ kmer_set = set() i = 0 while i + kmer_len < len(seq): kmer = seq[i:i+kmer_len] kmer_set.add(kmer) i += stride_len return kmer_set
13d57788e47fbb83f3f5e661abbc12cb83546fe2
513,627
def _get_cmd_tree(subcmds): """Convert flat list of subcmd objects into hierarchical dictionary {'command name': {'subcommand name 1': subcmd1, 'subcommand name 2': subcmd2}}""" cmds = {} for sub_cmd in subcmds: cmd_dict = cmds.setdefault(sub_cmd.cmd, {}) cmd_dict[sub_cmd.name] = sub_cmd return cmds
76f44db545d298b94f9eb2323a5ade280b5f0380
40,490
def remove_dup_and_null(cell): """ Remove duplicated coordinate sets and NULLs from a comma separated string """ ## Split the cell by commas and convert it to a set, this removes duplicated elements set_cell = set(cell.split(',')) ## Keep NULLs if it's the only entry in a cell, else, remove NULLs if "NULL" in set_cell and len(set_cell) > 1: set_cell.remove("NULL") ## Join the set if len(set_cell) > 1: sorted_cell = sorted(set_cell, key=lambda x: int(x.split(":")[0])) else: sorted_cell = set_cell new_cell = ",".join(sorted_cell) return new_cell
7331d7909f45561b0eae4a3ea389ddb62f121915
343,030
import inspect async def to_awaitable(value): """Wraps a value in a coroutine. If value is a future, it will await it. Otherwise it will simply return the value. Useful when we have a mix of coroutines and regular functions. >>> run_sync(await to_awaitable(5)) '5' >>> run_sync(await to_awaitable(some_coroutine_that_returns_its_input(5))) '5' """ if inspect.isawaitable(value): return await value return value
79d531825a4439ffba715e16e431f2db701b6357
588,097
def expScale(initVal, exp): """ Applies an exponent exp to a value initVal and returns value. Will work whether initVal is positive or negative or zero. """ val = initVal if val > 0: val = val ** exp if val < 0: val *= -1 val = val ** exp val *= -1 return val
5c897f394f28697c17121ebff71734b846f85bf0
56,029
def find_dialogue_event(dialogue_event_name, dialogue_event_defs): """ Find a dialogue event by name in a list of dialogue event definitions. :param dialogue_event_name: the name of the dialogue event to look for. :param dialogue_event_defs: a list of dialogue event definitions. :return: the dialogue_event_def with matching name, or None. """ for dialogue_event_def in dialogue_event_defs: if dialogue_event_def.name == dialogue_event_name: return dialogue_event_def return None
114ec4a2ee426d789ebcf76eb46756194e108d19
25,688
def bash_string(s): """Wrap a string in double quotes and escape things inside.""" s = s.replace('\\', '\\\\') # \ -> \\ s = s.replace('\"', '\\\"') # " -> \" return '\"{}\"'.format(s)
3283132517b5b0a857c2d46c265343d18b7c4b9c
94,985
def FindNodeWithTag(node, tag): """Look through a node's children for a child node with a given tag. Args: root: The node to examine. tag: The tag on a child node to look for. Returns: A child node with the given tag, or None. """ result = None for n in node.getchildren(): if n.tag == tag: assert not result result = n return result
5ea0cb05a858edd1e5b44d0c3319d36ece9d5e2e
376,897
def T_rec(T_hot, T_cold, y_factor): """Calculates receiver noise temperature via hot & cold temperatures and y factor.""" return (T_hot - y_factor * T_cold) / (y_factor - 1)
fc082e31368f957d978743ae4e3e3d2bf2440711
523,106
def get_floor(directions): """ Get the floor for Santa. Parameters ---------- directions : str A string of parentheses representing directions. An opening parenthesis, (, means he should go up one floor, and a closing parenthesis, ), means he should go down one floor. Returns ------- int The floor to which Santa should go. """ floor = 0 for step in directions: floor += {"(": 1, ")": -1}[step] return floor
f6ab8d6d2fb134a71a5a1a06db14073327a5eb58
633,100
def top_n_correlated_features(data_df, sensitive_feature, n): """returns the n features most correlated to the sensitive feature""" corr_df = data_df.corr() sex_corrs = corr_df.reindex(corr_df[sensitive_feature].abs().sort_values(ascending=False).index)[sensitive_feature] return sex_corrs.iloc[:n]
6233785e623cccb8aaa1d3d9a66a37483d7bc1a5
70,481
def _split_list_by_function(l, func): """For each item in l, if func(l) is truthy, func(l) will be added to l1. Otherwise, l will be added to l2. """ l1 = [] l2 = [] for item in l: res = func(item) if res: l1.append(res) else: l2.append(item) return l1, l2
3812a7b43cb103b746360943303441ae31122407
684,878
def find_line_starting_with_seq(the_list, seq, from_index=0, to_index=-1): """ Returns index of line in the document that starts with specified char sequence. Or -1 if sequence not found. :param the_list: list of strings :param seq: char sequence to find; :param from_index: index in the list; :param to_index: index in the list, open range; if is negative, the list will be searched till the end; :return: index of the first occurrence of 'seq' in the list or -1 if not found """ if to_index < 0: to_index = len(the_list) if from_index > to_index: from_index = to_index for i in range(from_index, to_index): if the_list[i].startswith(seq): return i return -1
ce2407b8054056c802f4f0a567e5d85924496131
157,965
from typing import List def _transform_body_for_vscode(body: str) -> List[str]: """snippetのbodyをvscode用に変換する. 改行区切りのlist, space4つをtabに変換. :param body: string of a snippet body :return body_for_vscode: vscode-snippet形式のbody """ body_list = body.split('\n') body_for_vscode = [b.replace(' ', '\t') for b in body_list] return body_for_vscode
1261efcedb87848fdfd9fee3ebed36c956db0019
658,983
def format_period(seconds): """ Formats a period in seconds into a string """ if seconds == (24 * 60 * 60): return "day" elif seconds == (60 * 60): return "hour" elif seconds > (24 * 60 * 60): return "%.1f days" % (seconds / 24. / 60. / 60.) elif seconds > (60 * 60): return "%.1f hours" % (seconds / 60. / 60.) elif seconds > (60): return "%.1f minutes" % (seconds / 60.) else: return "%.0f seconds" % seconds
95cb62c4123c3469a890f765c473f3335131f646
326,530
def generate_music_info(tag_editor_context: dict) -> str: """Generate the details of the music based on the values in `tag_editor_context` dictionary **Keyword arguments:** - tag_editor_context (dict) -- The context object of the user **Returns:** `str` """ ctx = tag_editor_context return ( f"*🗣 Artist:* {ctx['artist'] if ctx['artist'] else '-'}\n" f"*🎵 Title:* {ctx['title'] if ctx['title'] else '-'}\n" f"*🎼 Album:* {ctx['album'] if ctx['album'] else '-'}\n" f"*🎹 Genre:* {ctx['genre'] if ctx['genre'] else '-'}\n" f"*📅 Year:* {ctx['year'] if ctx['year'] else '-'}\n" f"*💿 Disk Number:* {ctx['disknumber'] if ctx['disknumber'] else '-'}\n" f"*▶️ Track Number:* {ctx['tracknumber'] if ctx['tracknumber'] else '-'}\n" "{}\n" )
a7f221460b04f4082a16b03d4dc6eeef2a7e9740
259,609
import hashlib def img_bin_to_sha256(img_bin): """Hash image binary data to sha1. Args: img_bin: binary string of image. Returns: sha1 of the image. """ img_sha2 = hashlib.sha256(img_bin).hexdigest() return img_sha2
457a2fe4373fb5efd618482e10a671dfaf642fd0
267,686
def _pad(left, right, amount=17): """Left pad a key/val string.""" pad = ' ' * (amount - len(left)) return f"{left} {pad} {right}"
768b2a481c2194e4a7bf2dbe942c4fbc636a9f0b
407,750
def normalize_space(data): """Implements attribute value normalization Returns data normalized according to the further processing rules for attribute-value normalization: "...by discarding any leading and trailing space (#x20) characters, and by replacing sequences of space (#x20) characters by a single space (#x20) character" """ result = [] scount = 2 # 0=no space; 1=add space; 2=don't add space for c in data: if c == ' ': if scount == 0: scount = 1 else: if scount == 1: result.append(' ') result.append(c) scount = 0 return ''.join(result)
e95b0178e54312c7ab329bc982f8add632709c3b
654,283
def entuple(x,n=2): """ Make sure given value is a tuple. It is useful, for example, when you want to provide dimentions of an image either as a tuple, or as an int - in which case you can use `(w,h) = entuple(x)`. :param x: Either atomic value or a tuple. If the value is atomic, it is converted to a tuple of length `n`. It the value is a tuple - it is inchanged :param n: Number of elements in a tuple :return: Tuple of a specified size """ if type(x) is tuple: return x elif type(x) is list: return tuple(x) else: return tuple([x]*n)
c5e8ca94d552b746b45342b803dd267cd7c481f3
637,628
def split(test_name): """Split a fully-qualified test name. Returns: A tuple: (recipe name, simple test name) """ recipe, simple_test_name = test_name.split('.', 1) return recipe, simple_test_name
6b7031592db0e1ba6bf0c6d80f3f6060cc2fd547
179,906
def get_alt_svms(svms, classes, target_class): """ Find alternative SVMs (e.g., for target class 0, find the svms classifying 0 against 1, and 0 against 2). Parameters ---------- svms : list List of eTree.Element objects describing the different one-to-one support vector machines in the PMML. classes : numpy.array The classes to be predicted by the model. target_class : str The target class. Returns ------- alt_svms : list List of eTree.Elements filtered to only include SVMs comparing the target class against alternate classes. """ # Noop for regression if classes[0] is None: return svms alt_svms = [ svm for svm in svms if svm.get('targetCategory') == str(target_class) or svm.get('alternateTargetCategory') == str(target_class) ] # Sort svms based on target class order alt_svms = [ next(svm for svm in alt_svms if svm.get('targetCategory') == str(c) or svm.get('alternateTargetCategory') == str(c)) for c in set(classes).difference({target_class}) ] return alt_svms
62d0d8f093f20891b080605cbea1f9c2d08c10c8
69,702
def edges_to_adj_list(edges): """ Turns a list of edges in an adjecency list (implemented as a list). Edges don't have to be doubled, will automatically be symmetric Input: - edges : a list of weighted edges (e.g. (0.7, 'A', 'B') for an edge from node A to node B with weigth 0.7) Output: - adj_list : a dict of a set of weighted edges """ adj_list = {} for w, i, j in edges: for v in (i, j): if v not in adj_list: adj_list[v] = set([]) adj_list[i].add((w, j)) adj_list[j].add((w, i)) return adj_list
0b309b3b78b8b7f7dd68a9d55a886181797a5c95
208,393
def strip_leading_blank_lines(text): """Return text with leading blank lines removed.""" split = text.splitlines() found = 0 for index, line in enumerate(split): if line.strip(): found = index break return '\n'.join(split[found:])
f30dfdca8f3979d3c49e39e613b217b39a77df65
287,037
def is_field(token): """Checks if the token is a valid ogc type field """ return token in ["name", "description", "encodingType", "location", "properties", "metadata", "definition", "phenomenonTime", "resultTime", "observedArea", "result", "id", "@iot.id", "resultQuality", "validTime", "time", "parameters", "feature"]
04cf3ac8777dd7a4605121eb02433d9f4c195d32
368,276
def compute_kappa_max(kappa_mean, opti_factor): """ Compute the supposed possible maximum value of kappa. Inputs: - kappa_mean: float Mean of kappa - opti_factor: float Optimisation factor Output: - kappa_max: float Supposed possible maximum value of kappa """ return opti_factor * kappa_mean
f22c8c7f23a3c220bcfdea01cd4ae2ce05d54f50
461,913
from typing import List from typing import Tuple def coords_to_query_string(coords:List[Tuple[float, float]], api_key:str) -> str: """ Converts a list of lat,lon tuples into the query format required by Google Maps Elevation API. Args: coords: List of (lat,lon) tuples. api_key: Api key for Google Elevation API Returns: Query string. """ prefix = "json?path=" if len(coords) > 1 else "json?locations=" path_string = "" for lat, lon in coords: path_string += f"{str(lat)},{str(lon)}|" path_string = path_string[:-1] sample_string = f"&samples={len(coords)}" if len(coords) > 1 else "" key_string = f"&key={api_key}" return prefix + path_string + sample_string + key_string
240fdd875c32f81e4640a3051c4f96af23e1396c
464,759
from typing import OrderedDict def _read_index(stream): """Reads a dictionary of type String : uint""" length = stream.read_uint() data = OrderedDict() for _ in range(length): key = stream.read_string() value = stream.read_uint() data[key] = value return data
61c0e7b83fe64e7c8033e0c3db629b6dbb40e5a1
553,350
def get_structure(pdb_file, model=None, altloc="first", extra_fields=[], include_bonds=False): """ Create an :class:`AtomArray` or :class:`AtomArrayStack` from a :class:`PDBFile`. This function is a thin wrapper around the :class:`PDBFile` method :func:`get_structure()` for the sake of consistency with other ``structure.io`` subpackages. Parameters ---------- pdb_file : PDBFile The file object. model : int, optional If this parameter is given, the function will return an :class:`AtomArray` from the atoms corresponding to the given model number (starting at 1). Negative values are used to index models starting from the last model insted of the first model. If this parameter is omitted, an :class:`AtomArrayStack` containing all models will be returned, even if the structure contains only one model. altloc : {'first', 'occupancy', 'all'} This parameter defines how *altloc* IDs are handled: - ``'first'`` - Use atoms that have the first *altloc* ID appearing in a residue. - ``'occupancy'`` - Use atoms that have the *altloc* ID with the highest occupancy for a residue. - ``'all'`` - Use all atoms. Note that this leads to duplicate atoms. When this option is chosen, the ``altloc_id`` annotation array is added to the returned structure. extra_fields : list of str, optional The strings in the list are optional annotation categories that should be stored in the output array or stack. These are valid values: ``'atom_id'``, ``'b_factor'``, ``'occupancy'`` and ``'charge'``. include_bonds : bool, optional If set to true, a :class:`BondList` will be created for the resulting :class:`AtomArray` containing the bond information from the file. All bonds have :attr:`BondType.ANY`, since the PDB format does not support bond orders. Returns ------- array : AtomArray or AtomArrayStack The return type depends on the `model` parameter. """ return pdb_file.get_structure(model, altloc, extra_fields, include_bonds)
3887837cb4116ec93e3bc7fef95f6afca2b59177
565,578
import secrets def random_file_name(filename: str) -> str: """ Replace file name with a random string and preserving its extension. Result would look like <random_string>.<extension> :param filename: str :return: str """ return f'{secrets.token_urlsafe(16)}.{filename.split(".")[-1]}'
78d12aa08d55f0fba71427844830a71710ac1bf0
64,265
def data(context, data): """Replace the tag's content with the current data. """ return context.tag.clear()[data]
caa302cda6f2a8fecc3c453375d703f9438b5412
192,799
def make_seq_label_output(result, infer_input): """Transform model output into user-friendly contents. :param result: 2-D list of strings. (model output) :param infer_input: 2-D list of string (model input) :return ret: list of list of tuples [ [(word_11, label_11), (word_12, label_12), ...], [(word_21, label_21), (word_22, label_22), ...], ... ] """ ret = [] for example_x, example_y in zip(infer_input, result): ret.append([(x, y) for x, y in zip(example_x, example_y)]) return ret
8490972b6f9ce4fbd1051c8b0db88e5b9578de9a
345,140
import re def GetEvalueFromAnnotation(line):#{{{ """ Parsing E-value from the annotation line """ if line: m=re.search('evalue *=[^, ]*',line) if m: evalue = m.group(0).split('=')[1] try: return float(evalue) except (ValueError, TypeError): return None else: return None return None
c680c30dc8dc8d6a788424a515363b4ee62a65c0
436,364
import itertools def flatten_dict(**kwargs): """ Flatten a dict of lists into a list of dicts For example flatten_dict(lr=[1, 2], a=[10,3], b=dict(c=[2, 4], d=np.arange(1, 3))) >>> [{'lr': 1, 'a': 10, 'b': {'c': 2, 'd': 1}}, {'lr': 2, 'a': 3, 'b': {'c': 4, 'd': 2}}] """ ks, vs = [], [] for k, v in kwargs.items(): ks.append(k) if isinstance(v, dict): vs.append(flatten_dict(**v)) elif isinstance(v, (int, float)): vs.append([v]) else: vs.append(v) result = [] for k, v in itertools.product([ks], zip(*vs)): result.append(dict(zip(k, v))) return result
4a42dcf4fcceb74382541ec8e9412fa0e87635ff
583,737
def int_to_big_endian(n: int, length: int) -> bytes: """ Represents integer in big endian byteorder. :param n: integer :param length: byte length :return: big endian """ return n.to_bytes(length, "big")
ea16ab76dc68d0aef3f189aa0b53fdd99d42b8cf
272,644
def lower_text(sentences): """Given a list of sentences, return the sentences where each character is converted to lowercase.""" return [sentence.lower() for sentence in sentences]
6fb6b9796f7523f577dfd4382f30059f9e6080de
598,217
from typing import List from typing import Pattern import re from typing import Optional from typing import Match def _remove_blank_lines(*, expression: str) -> str: """ Remove blank (break or spaces only) lines from expression string. Parameters ---------- expression : str Target expression string. Returns ------- expression : str Expression string that removed blank lines. """ lines: List[str] = expression.splitlines() result_lines: List[str] = [] pattern: Pattern = re.compile(pattern=r'^\s*$') for line in lines: match: Optional[Match] = pattern.match(string=line) if match is not None: continue result_lines.append(line) expression = '\n'.join(result_lines) return expression
bd6976ea6b38907c93fb53d2b445dd482875ab65
628,958
def generate_missing_location_msg(filepath, errors): """Generate error message for missing location LookupError.""" msg = ( "Unused step implementation" if filepath.endswith(".py") else "Step implementation not found" ) if len(errors) == 1: msg += ". Also registered an error: {}".format(str(errors[0])) elif errors: msg += ". Also registered {} errors, " "use :BehaveErrors to see them.".format( len(errors) ) return msg
7dcf958d10b33a8c2212918dafd5e33445d25549
283,644
from typing import OrderedDict def _top_level_tags(form): """ Returns a OrderedDict of the top level tags found in the xml, in the order they are found. """ to_return = OrderedDict() element = form.get_xml_element() if element is None: return OrderedDict(sorted(form.form_data.items())) for child in element: # fix {namespace}tag format forced by ElementTree in certain cases (eg, <reg> instead of <n0:reg>) key = child.tag.split('}')[1] if child.tag.startswith("{") else child.tag if key == "Meta": key = "meta" to_return[key] = form.get_data('form/' + key) return to_return
26e2b3ac9e8aa0f28aa95fdafd8a8b9b8a0812ed
56,561
def slice_along_axis(index, axis, ndim): """ Return a selector that takes a single-element slice (subtensor) of an nd-array along a certain axis (at a given index). The result would be an (n-1)-dimensional array. E.g. data[:, :, 5, :]. The advantage of this function over subscript syntax is that you can specify the axis with a variable. :param index: :param axis: :param ndim: :return: """ return tuple([index if axis == i else None for i in range(0, ndim)])
99d393eaa0ded322e4fd3c458af86ef661fe70f3
262,690
def get_account_key_change_status( self, ) -> dict: """Get current account key change status .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - spPortal - GET - /spPortal/account/key/changeStatus :return: Returns dictionary of account key change status \n * keyword **generateTimestamp** (`int`): Latest epoch time in milliseconds when new account key was generated. ``Null`` if account key has never been changed. * keyword **committedToPortalTimestamp** (`int`): Latest epoch time in milliseconds when old account key was delete from Portal. ``Null`` if current account key change has not been committed to Portal. :rtype: dict """ return self._get("/spPortal/account/key/changeStatus")
6ae1970364697b5b2fed14d9c6933f0703984c1a
263,860
def _fmt_rank(val): """Returns value (between 0 and 1) formatted as a percentage.""" return '%.5f' % (100 * val)
cc9fe6ce15371fe0540112d7f24b82fdf8e29a2c
365,980
def pstring(state, num): """Return a nice string give a state and its count. Example: >>> pstring(X(0,-1,1),4) ( 0, *, 1) : 4 """ a,b,c = state if b == -1: b = " *" return f"({a:2},{b:2},{c:2}) : {num:2}"
3af8fe9b35d43dbca4f03b98c0f43721c6822203
685,749
import string def isValidMapKey(key): """Returns ``True`` if the given string is a valid key for use as a colour map or lookup table identifier, ``False`` otherwise. A valid key comprises lower case letters, numbers, underscores and hyphens. """ valid = string.ascii_lowercase + string.digits + '_-' return all(c in valid for c in key)
2e9167c3351b6c80bcc12c129279c4048f511e24
22,670
def get_sample_nwb_metadata(experiment_id): """ Returns sample NWB metadata for testing purposes without needing an ElabFTW token. :param experiment_id: Ignored - this function always returns the same sample metadata :return: Sample NWB metadata :rtype: dict """ # Output of get_nwb_metadata(156) with a valid elabftw token. # Note: this needs to be updated when this output changes. return { "NWBFile": { "session_description": "test fake experiment with json metadata", "identifier": "20211001-8b6f100d66f4312d539c52620f79d6a503c1e2d1", "session_start_time": "2021-10-01 11:13:47", "experimenter": ["Liam Keegan"], "institution": "Heidelberg University, Physiology and Pathophysiology", "lab": "Medical Biophysics, Groh/Mease", "virus": "AAVretr ChR2-tdTomato:\n * Virus in -80 storage: AAVrg-CAG-hChR2-tdTomato (AAV Retrograde)\n * Origin: Addgene\n * Comments: retrograde\n * Expression Quality: \n * product number: \nAAVretr Flpo:\n * Virus in -80 storage: AAVretr EF1a-Flpo\n * Origin: Addgene\n * Comments: retrograde Flip\n * Expression Quality: \n * product number: 55637-AAVrg\n", }, "Subject": { "sex": "unknown", "weight": 0.002, "genotype": "Nt1Cre-ChR2-EYFP", "subject_id": "xy1", "description": "test mouse", "date_of_birth": "2000-01-01", }, "Ecephys": {}, "Other": { "OptogeneticStimulationSite": { "device": "the device", "location": "S1: primary somatosensory cortex", "description": "laser stimulation", "excitation_lambda": "473", }, "SiliconProbe": { "Probe identifier:": "1234356", "ElectrodeGroup.name": "H5", "ElectrodeGroup.description": "a test H5 probe", }, }, }
d0611ce17cb0cc4276d745bc9b8dc2857756a991
257,936
def zero_pad_value(value: int) -> str: """ Zero pad the provided value and return string. """ return "0" + str(value) if value < 10 else str(value)
eb84dd7d41e3ef29b13b5c7a7137f6d02fc55477
199,220
def jsonrpc_error(id, code, message, data=None): """Create JSON-RPC error response""" return { 'jsonrpc': '2.0', 'error': { 'code': code, 'message': message, 'data': data, }, 'id': id, }
3fb820eeefd7927f153b0ffd8f5e16e18cb13924
381,226
def format_labels(labels_dict): """ Format labels dictionary as 'key:value,key2:value' """ if labels_dict is None: return "" labels = [] for k, v in labels_dict.items(): labels.append("%s:%s" % (k, str(v))) return ", ".join(labels)
fd8aa075752e54a3247a5d4c803882568ba774d8
387,194
import re def CheckFileExt(FileName, FileExts): """Check file type based on the specified file extensions delimited by spaces. Arguments: FileName (str): Name of a file. FileExts (str): Space delimited string containing valid file extensions. Returns: bool : True, FileName contains a valid file extension; Otherwise, False. """ for FileExt in FileExts.split(): if re.search(r"\.%s$" % FileExt, FileName, re.IGNORECASE): return True return False
006a6f7de6d6f957bfac5fac0252b6062e4b5ad3
186,895
def pick_bpart(df, bpart): """ Create a sub dataset of particular body part. :param df: dataframe to process :param bpart: body part to extract :return: trimmed dataframe """ if bpart == "all": return df return df[df["body_part"] == bpart].reset_index()
00d3d80f0e8614d0ee14345cd5e881a1738372b3
620,302
def rename_columns(df, rename_dict): """Renames columns based on `rename_dict` Args: df (`pandas.DataFrame`): The dataframe to rename columns in rename_dict (:type:`dict`): A dict in the format `{'old_name': 'new_name'}` to use to rename the columns Returns: `pandas.DataFrame`: `df` with renamed columns """ return df.rename(columns=rename_dict)
30ac3e5bb888897799d0d899fa186a48785ec54b
66,606
def connect_to_ecs(env): """ Return boto connection to the ecs in the specified environment's region. """ rh = env.resource_handler.cast() wrapper = rh.get_api_wrapper() client = wrapper.get_boto3_client( 'ecs', rh.serviceaccount, rh.servicepasswd, env.aws_region ) return client
e4c0b7ad80c18fd6d2a90df6670ca9bfa6f1cbe3
703,150
from bs4 import BeautifulSoup def get_tables_from_html_page(html_page): """Given an html page, return all html tables on the page and their contents, i.e., all <td ..>, <tr ..>""" #tables = (BeautifulSoup(html_page)).findChildren('table') tables = (BeautifulSoup(html_page)).find_all('table') return tables
218a541c8beab9ae7b909a430d2a88bf1edad677
315,421
def upperLevelCredits (theDictionary): """Counts the number of upper-level undergraduate credits. :param dict[str, int] theDictionary: The student's class information with the class as the key and the number or credits as the value :return: The number of credits :rtype: int """ numCredits = 0 for i in theDictionary: deptAndNum = i.split () if int (deptAndNum[1][:3]) in range(300, 500): numCredits += theDictionary[i] return numCredits
06c27b35fb23ff517fb2397cd42fe5d879432e1d
258,010
import re def get_feat_shape(visual_archi, compress_type=None): """ Get feat shape depending on the training archi and compress type """ if visual_archi == "fasterrcnn": # the RCNN model should be trained with min_size=224 feat_shape = (-1, 2048, 7, 7) elif visual_archi == "maskrcnn": # the RCNN model should be trained with min_size=800 feat_shape = (-1, 2048, 10, 10) elif visual_archi == "resnet18": feat_shape = (-1, 512, 7, 7) else: raise NotImplementedError("Unknown archi {}".format(visual_archi)) if compress_type is not None: if not re.match(r"\d+x", compress_type): raise NotImplementedError("Unknown compress type {}".format(compress_type)) compress_times = int(compress_type[:-1]) feat_shape = ( feat_shape[0], feat_shape[1] // compress_times, feat_shape[2], feat_shape[3], ) return feat_shape
14a968924a1ee6be7f10de08578c19b8daadddd9
595,769
def _get_motion_string(n_frames, frame_time, frames): """ Compose the motion part of a bvh file. :param n_frames: Number of frames. :type n_frames: int :param frame_time: Time in seconds it takes to advance 1 frame. :type frame_time: float :param frames: The motion data for channels of all joints. :type frames: numpy.ndarray :return: Motion as string representation. :rtype: str """ s = 'MOTION\n' s += 'Frames: {}\n'.format(n_frames) s += 'Frame Time: {}\n'.format(frame_time) for frame in frames.astype(str): s += ' '.join(frame) s += '\n' return s
c548bf8319001efeaaa3cdda42e4bc96c39fa1c0
424,174
def with_prefix(prefix, name): """Adds prefix to name.""" return "/".join((prefix, name))
03e82480cee3bb9cdf0b73c4e26a792c271e202d
633,183
def german_weekday_name(date): """Return the german weekday name for a given date.""" days = [u'Montag', u'Dienstag', u'Mittwoch', u'Donnerstag', u'Freitag', u'Samstag', u'Sonntag'] return days[date.weekday()]
7d2919c61438ec913abe38cccd924bb69f866655
1,089
def optional_square(number: int = 5) -> int: # noqa """ Square `number`. The function from Modin. Parameters ---------- number : int Some number. Notes ----- The `optional_square` Modin function from modin/scripts/examples.py. """ return number ** 2
eaf7cbce6397a577cb563aebb68721ba7c6f3442
425,024
def OwnerCcsInvolvedInFilterRules(rules): """Finds all user_ids in the given rules and returns them. Args: rules: a list of FilterRule PBs. Returns: A set of user_ids. """ user_ids = set() for rule in rules: if rule.default_owner_id: user_ids.add(rule.default_owner_id) user_ids.update(rule.add_cc_ids) return user_ids
01b26a9774b232d98b9777062052efa1c8acee69
154,326
import torch def lagged_input(input, hist_len, hist_stride=1, time_stride=1, tensor_type=torch.float): """ Introduce lagged history input from time series. :param torch.tensor input: input of shape (dimensions, timesteps) :param int hist_len: :param int hist_stride: :param int time_stride: :param dtype tensor_type: :returns: lagged input tensor of shape (dimensions, time-H+1, history) :rtype: torch.tensor """ in_unfold = input.unfold(-1, hist_len, time_stride)[:, :, ::hist_stride] # (dim, time, hist) return in_unfold
db71a5d6a99dc109b21abe2e7afeb320151d3af3
262,289
def _sam_readline(sock): """ read a line from a sam control socket """ response = bytearray() while True: c = sock.recv(1) if c: if c == b'\n': break response += c else: break return response.decode('ascii')
ac35ce3bdd6a4e28eba0b25adae5e53b8b794aa6
62,186
def update_ave_depth(leaf_depth_sum, set_of_leaves): """ Average tree depth update. Inputs: - leaf_depth_sum: The sum of the depth values of all leaf nodes. - set_of_leaves: A python set of leaf node ids. Output: - ave_depth: The average depth of the tree. """ ave_depth = leaf_depth_sum/len(set_of_leaves) return ave_depth
ceb19f3b92bbdf5a8c5d2cac0a9df8ad61fe7ebb
481,920
def RGB_to_CMYK(r, g, b, gcr=1.0): """ take r,g,b float values (0.0 to 1.0), invert to get c,m,y, apply GCR (0.0 to 1.0, 0 means no GCR = CMY separation), return c,m,y,k as integers (percent values) GCR see http://en.wikipedia.org/wiki/Grey_component_replacement """ c, m, y = (1.0 - (float(x)/2) for x in (g+b, r+b, r+g)) k = min(c, m, y) * gcr c, m, y = c-k, m-k, y-k return [int(round(x*100)) for x in (c, m, y, k)]
8144f6c0e324cc91556c802f04ba53dd4d172d1f
343,108
def split_key_val_pairs(context, parameter, args): # pylint: disable=unused-argument """Split key-value pairs into a dictionary""" return dict(arg.split("=") for arg in args)
4c9431c06912f687118320a9f08a2edc4c7113b1
418,841
from typing import Any import typing def hint_is_specialized(hint: Any, target: Any) -> bool: """Checks if a type hint is a specialized version of target. E.g., hint_is_specialized(ClassVar[int], ClassVar) is True. isinstance will invoke type-checking, which this methods sidesteps. Behavior is undefined for simple type hints that don't take a type argument, like Any or a bare type. """ return typing.get_origin(hint) is target
b651fc05290de82ab5a5833d10ca68d6a96f2d7a
701,984
def convert_size(free, total, mode): """ Takes free and total size and coverts them based on conversion mode free - returns free total - returns total used - returns difference between total and free pfree - returns free as percentage of total pused - returns used as percentage of total :param mode: one of SIZE_CONVERSION_MODES """ if total == 0: return 0 # even if free is not 0, it is better to alert authorities value = None if mode == 'free': value = free elif mode == 'total': value = total elif mode == 'used': value = total - free elif mode == 'pfree': value = (free / total) * 100 elif mode == 'pused': used = (total - free) value = (used / total) * 100 return value
62ae759359758d511792412ee04b6b9a581a4fb7
102,934
def load_items_descriptions(data, items_name): """ Loading each items respective description. Parameters: data(dict): Nested dictionaries containing all information of the game. items_name(list): List of item names. Returns: items_descriptions(list): Returns list of item descriptions. """ item_descriptions = [] for name in items_name: description = data['Items'][name]['Description'] item_descriptions.append(description) return item_descriptions
653caaf84a4cd64fcf81f857004a282f58074fdf
452,588
import time import torch def record_ddp_fit_model_stats(trainer, model, use_cuda): """Helper to calculate wall clock time for fit + max allocated memory. Args: trainer: The trainer object. model: The model to fit. use_cuda: Whether to sync CUDA kernels. Returns: Max Memory if using GPUs, and total wall clock time. """ max_memory = None time_start = time.perf_counter() if use_cuda: torch.cuda.reset_peak_memory_stats() torch.cuda.synchronize() trainer.fit(model) if use_cuda: torch.cuda.synchronize() max_memory = torch.cuda.max_memory_allocated() / 2 ** 20 total_time = time.perf_counter() - time_start return max_memory, total_time
fdb6d7bd2ce6cb3a38cf9ab491be541863d05fd6
115,703
import math def two_divider(num): """Solution to exercise P-1.30. Write a Python program that can take a positive integer greater than 2 as input and write out the number of times one must repeatedly divide this number by 2 before getting a value less than 2. """ if not isinstance(num, int) or (num < 3): raise ValueError return int(math.log(num, 2))
2bc6cdf305d2ed423e9bab0a5011dbb8a6c9efd1
661,517
import yaml def parse_yaml_file(path): """Parses a YAML file and returns a dictionary representation of it :param path: Path to the YAML file :return: A dictionary representation of the YAML file """ with open(path, 'r') as file: loaded = yaml.load(file, Loader=yaml.FullLoader) return loaded
a772e56a3ff3d7eac8a7e751f55f5ce2614ac167
476,005
def dydt(t, y, k, m, x_car): """First and second derivative of position as function of time. Args: t: time y: [x, v] k: spring stiffness m: brick mass x_car: function giving car position as function of time Returns: [v(t), a(t)] """ dydt2 = -(k/m) * (y[0] - x_car(t)) return y[1], dydt2
aab0a4787a268c860e929d644e5652f43197f413
492,151
def check_sink_place_presence(net): """ Check if there is a unique sink place with empty connections Parameters ------------- net Petri net Returns ------------- place Unique source place (or None otherwise) """ count_empty_output = 0 unique_sink = None for place in net.places: if len(place.out_arcs) == 0: count_empty_output = count_empty_output + 1 unique_sink = place if count_empty_output == 1: return unique_sink return None
080c422c9619834e001006d698e41dbd0e00ce80
203,705
def convert_arabic_to_roman(arabic): """ Convert an arabic literal to a roman one. Limits to 39, which is a rough estimate for a maximum for using roman notations in daily life. ..note:: Based on https://gist.github.com/riverrun/ac91218bb1678b857c12. :param arabic: An arabic number, as string. :returns: The corresponding roman one, as string. """ if int(arabic) > 39: return arabic to_roman = { 1: 'I', 2: 'II', 3: 'III', 4: 'IV', 5: 'V', 6: 'VI', 7: 'VII', 8: 'VIII', 9: 'IX', 10: 'X', 20: 'XX', 30: 'XXX' } roman_chars_list = [] count = 1 for digit in arabic[::-1]: digit = int(digit) if digit != 0: roman_chars_list.append(to_roman[digit * count]) count *= 10 return ''.join(roman_chars_list[::-1])
6f786c75250fe4da7e7c540acc82a8fc100254a7
41,917
import requests def get_languages(api_url): """ Return supported languages as a list of dictionaries. Args: api_url (str): API base url. Returns: List[dict]: Supported languages as a list of dictionaries. Each dictionary contains three keys, ``name``, ``code`` and ``longCode``:: { "name":"English (GB)", "code":"en", "longCode":"en-GB" } """ r = requests.get(api_url + "languages") return r.json()
c36b6e914ad3f3ce35aa1fd166d37d2c25ed8527
586,837
from functools import reduce def solve(n, ar): """ Given an integer array of size n, return the sum of all its elements. """ # We could explicitly iterate over each element the array, but in python, # we can just use the reduce() function to sum up a list of numbers. return reduce((lambda x, y: x + y), ar)
da80e675c3580eecf52cbcf92a774786f39a0ddf
336,885
def closest_bin(q, bin_edges): """ Find closest bin to a q-value :param float q: q-value :param list bin_edges: list of bin edges """ for i in range(len(bin_edges)): if q > bin_edges[i] and q <= bin_edges[i+1]: return i return None
8e5208d25d795d4a67a5960ab39388567d69f1f3
241,008
import collections def _test_spinn_config(d_embed, d_out, logdir=None, inference_sentences=None): """Generate a config tuple for testing. Args: d_embed: Embedding dimensions. d_out: Model output dimensions. logdir: Optional logdir. inference_sentences: A 2-tuple of strings representing the sentences (with binary parsing result), e.g., ("( ( The dog ) ( ( is running ) . ) )", "( ( The dog ) ( moves . ) )"). Returns: A config tuple. """ config_tuple = collections.namedtuple( "Config", ["d_hidden", "d_proj", "d_tracker", "predict", "embed_dropout", "mlp_dropout", "n_mlp_layers", "d_mlp", "d_out", "projection", "lr", "batch_size", "epochs", "force_cpu", "logdir", "log_every", "dev_every", "save_every", "lr_decay_every", "lr_decay_by", "inference_premise", "inference_hypothesis"]) inference_premise = inference_sentences[0] if inference_sentences else None inference_hypothesis = inference_sentences[1] if inference_sentences else None return config_tuple( d_hidden=d_embed, d_proj=d_embed * 2, d_tracker=8, predict=False, embed_dropout=0.1, mlp_dropout=0.1, n_mlp_layers=2, d_mlp=32, d_out=d_out, projection=True, lr=2e-2, batch_size=2, epochs=20, force_cpu=False, logdir=logdir, log_every=1, dev_every=2, save_every=2, lr_decay_every=1, lr_decay_by=0.75, inference_premise=inference_premise, inference_hypothesis=inference_hypothesis)
7e6a2d005c2b758d534157748a0c948dfddb73de
522,048
import torch def xyxy_to_xywh(boxes: torch.Tensor): """Converts a set of boxes in [top_left_x, top_left_y, bottom_right_x, bottom_right_y] format to [top_left_x, top_left_y, width, height] format""" assert boxes.shape[-1] == 4 converted = boxes.clone() converted[..., 2:] -= converted[..., :2] return converted
4854509eca2a75d8ea622dc7cf1fe748ad02bb99
596,804
def determine_cycle_edges(cycle_nodes): """Determine the edges of the nodes in the cycle.""" edges = [] for idx, elem in enumerate(cycle_nodes): this_element = elem next_element = cycle_nodes[(idx + 1) % len(cycle_nodes)] edges.append((this_element, next_element)) return edges
3f46339aaa6726329e6fb72521a178f67639627c
507,409
import pickle def read_pickle(filepath): """Read pickle file""" infile = open(filepath,'rb') data = pickle.load(infile) infile.close() return data
eb356661b5ca1f530da0f7623397cf6d8b4eb663
650,398
def calc_csi(precision, recall): """ Compute the critical success index """ precision[precision<1e-5] = 1e-3 recall[recall<1e-5] = 1e-3 csi = 1.0 / ((1/precision) + (1/recall) - 1.0) return csi
506d49943955c8a8f1ef02408dea498bec0ed560
522,647
def deal_text(text: str) -> str: """deal the text Args: text (str): text need to be deal Returns: str: dealed text """ text = " "+text text = text.replace("。","。\n ") text = text.replace("?","?\n ") text = text.replace("!","!\n ") text = text.replace(";",";\n ") return text
8f16e7cd2431dfc53503c877f9d4b5429f738323
709,933
from typing import Type from enum import Enum from typing import List from typing import Tuple def enum_to_choices(enum: Type[Enum]) -> List[Tuple]: """Convert enum to django choices.""" return [(item.name, str(item.value)) for item in enum]
986002aebbe8405897d2cdea9ba28f28802fe856
302,012
def suite(*args, **kwargs): """Decorator to conditionally assign suites to individual test methods. This decorator takes a variable number of positional suite arguments and two optional kwargs: - conditional: if provided and does not evaluate to True, the suite will not be applied. - reason: if provided, will be attached to the method for logging later. Can be called multiple times on one method to assign individual conditions or reasons. """ def mark_test_with_suites(function): conditions = kwargs.get('conditions') reason = kwargs.get('reason') if not hasattr(function, '_suites'): function._suites = set() if args and (conditions is None or bool(conditions) is True): function._suites = set(function._suites) | set(args) if reason: if not hasattr(function, '_suite_reasons'): function._suite_reasons = [] function._suite_reasons.append(reason) return function return mark_test_with_suites
327f2ea00fd0bd61d7777127799f23744801d529
482,720
def nop(*args, **kwargs): """ Does no operation Parameters ---------- args : ... kwargs : ... Returns ------- None """ return None
641fde1baadafdaff0993fae6c67e6b05005d803
368,143
def in_dict(key, dictionary): """ Inputs: key- the key to be checked for in the dictionary dictionary- the dictionary the key will be searched in Checks a dictionary to see if it contains a key. If it does, True is returned; False is returned otherwise. """ keys = list(dictionary) for each in keys: if each == key: return True return False
ab9ab57e956107c2caa081ab6755589402d6ce8b
237,833
def add_macros(macros): """ add macros to compile string Args: macros (dict/list): preprocessor macros Returns: compile_str (str): macro string """ compile_str = '' if type(macros) is dict: for k,v in macros.items(): if v is None: compile_str += f' /D{k}' else: compile_str += f' /D{k}={v}' elif type(macros) is list: for k in macros: compile_str += f' /D{k}' return compile_str
7e4358ab16a409656b131155a94ce9f099e2544e
292,736
def align_rows(rows, bbox): """ For every row, align the left and right boundaries to the final table bounding box. """ try: for row in rows: row['bbox'][0] = bbox[0] row['bbox'][2] = bbox[2] except Exception as err: print("Could not align rows: {}".format(err)) pass return rows
756957ec1554f8eb4847a439cba45429c46b9ac4
59,077
import csv def _load_class_names(file_name): """Load the class names.""" # Open the TSV file and skip its header. with open(file_name, "rt") as f: csv_reader = csv.reader(f, delimiter="\t") next(csv_reader) # The class name is on the second column. class_names = [row[1] for row in csv_reader] return class_names
39fcabb63793a4f378bc69d2daa0879d41ef7cab
537,243
def unique_columns_list(nested_lists): """ Flatten the nested list (two levels) and leave unique elements. Parameters ---------- nested_lists : list A list which contains sublists. Returns ------- list A list with unique elements from sublists. """ return list(set([item for sublist in nested_lists for item in sublist]))
dd84ce0c28a10b74cf2fe8f9b50411527f922d68
471,696
def max_value(d): """ Takes a dictionary d and returns the maximum element value and its corresponding key. Raises a TypeError if any of the values are not comparable to each other. >>> max_value({'a': 12, 3: 45}) (3, 45) >>> max_value({}) is None True >>> max_value({33: 34, -1: 600, 'xyz': 2000.4}) ('xyz', 2000.4) >>> max_value({1: 'abc', 2: 'xyz', 3: 'ghijkl'}) (2, 'xyz') >>> max_value({1:'a', 2:3}) # doctest:+ELLIPSIS Traceback (most recent call last): ... TypeError:... """ # Hint: d.values() is a sequence of all the values in dictionary d # try using this along with built-in function max if d == {}: # if empty, return None return None max_key = '' # initialize max_key as '' max_val = max(d.values()) # set max_val to the max of all values in d for key in d.keys(): # iterate over all keys in d if d[key] >= max_val: max_val = d[key] # set d[key] to max_val if it's the highest that's been iterated over max_key = key # assign max_key when max_val is assigned return (max_key, max_val)
6469d2f35bd05f088bf7b101ef47f151e6fc3f02
175,956
import torch def earth_mover_distance(input: torch.Tensor, target: torch.Tensor, r: float = 2): """ Batch Earth Mover's Distance implementation. Args: input: B x num_classes target: B x num_classes r: float, to penalize the Euclidean distance between the CDFs Returns: """ N, num_classes = input.size() input_cumsum = torch.cumsum(input, dim=-1) target_cumsum = torch.cumsum(target, dim=-1) diff = torch.abs(input_cumsum - target_cumsum) ** r class_wise = (torch.sum(diff, dim=-1) / num_classes) ** (1. / r) scalar_ret = torch.sum(class_wise) / N return scalar_ret
11bce97c9d654c54eece4a60065fd19a957d8e58
102,914
from typing import Dict from typing import Any def _make_pod_envconfig(relation_state: Dict[str, Any]) -> Dict[str, Any]: """Generate pod environment configuration. Args: relation_state(Dict[str, Any]): relation details. Returns: Dict[str, Any]: pod environment configuration. """ mysql_db = relation_state["db"] endpoints = f"{mysql_db}-endpoints" return { # General configuration "MYSQL_HOST": endpoints, "MYSQL_USER": relation_state["user"], "MYSQL_ROOT_PASSWORD": relation_state["pwd"], }
3cfe7f0a8c9baac05c8a91c664d0a31953815572
549,926
def remove_indent(lines): """ Remove all indentation from the lines. """ return [line.lstrip() for line in lines]
d294364630888f19b59f5df5ed1dbfbaeb23a4ab
500,630
import unicodedata def normalize(tag): """Normalize a single tag: remove non valid chars, lower case all.""" tag_stripped = tag.strip() value = unicodedata.normalize("NFKD", tag_stripped.lower()) value = value.encode('ascii', 'ignore').decode('utf-8') return value
bad11bc517d971bf71e0a4cef9df5cc2a388f8cb
81,217
def UnitStringIsValid(unit: str) -> bool: """Checks to make sure that a given string is in fact a recognized unit used by the chromium perftests to report results. Args: unit (str): The unit string to be checked. Returns: bool: Whether or not it is a unit. """ accepted_units = [ "us/hop", "us/task", "ns/sample", "ms", "s", "count", "KB", "MB/s", "us" ] return unit in accepted_units
c8ebfadce2096dcc1753e86691bac093cf654d83
306,961
def get_raffles_for_calendar_month(db, date, user_id, conf): """Get all of the entries for a calendar month Args: db (psycopg2.connection): db object date (datetime.date): date object with the month user_id (int): user id to look up entries for conf (Config): config object Returns: psycopg2.cursor: returns all the raffles for the month """ cur = db.cursor() cur.execute(f"SELECT e.id, e.date, e.notes, e.maker, e.result, e.mid FROM all_entries e WHERE e.user_id = {user_id} AND EXTRACT(MONTH from e.date) = {date.month} AND EXTRACT(YEAR from e.date) = {date.year} ORDER BY e.maker ASC, e.id DESC") return cur
ff35af781cb27d984f695e094ab15134eeb3c8c4
548,365
import requests import time def get_data(url, max_retries=5, delay_between_retries=1): """ Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe and return it as a JSON object. ​ Args: url (str): The url to be fetched. max_retries (int): Number of retries. delay_between_retries (int): Delay between retries in seconds. Returns: data (dict) """ for i in range(max_retries): try: raw = requests.get(url=url) data = raw.json() return data except: print("Exception occured on " + str(i+1) +" attempt to fetch data") time.sleep(delay_between_retries) raise ConnectionError
b278ee956ceb558740e7c467097613fd65080a1f
85,513
def knight_amount(board_state, player): """ Returns amount of knights the player has """ board = board_state knight_amt = 0 for row in board: for column in row: if player == 1 and column == "k": knight_amt += 1 elif player == 0 and column == "K": knight_amt += 1 return knight_amt
4a2cad597ec0751fb6d4d751a177509576cba87d
677,229