content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def states_hash(states): """Generate a hash of a list of states.""" return "|".join(sorted(states))
50308bc2199aa9a295af9416d6fcd6142fa8e334
85,332
import re def check_umi_template(umi, template): """ Checks that the UMI (molecular barcode) given as input complies with the pattern given in template. Returns True if the UMI complies :param umi: a molecular barcode :param template: a reg-based template with the same distance of the UMI that should tell how the UMI should be formed :type umi: str :type template: str :return: True if the given molecular barcode fits the pattern given """ p = re.compile(template) return p.match(umi) is not None
1467711238f6ee25d447ec8b6adb813e0ab3946d
84,223
def row_multiply(matrix, row, factor): """ Multiplies a row by a factor :param matrix: List of lists of equal length containing numbers :param row: index of a row :param factor: multiplying factor :return: List of lists of equal length containing numbers """ matrix[row] = [i*factor for i in matrix[row]] return matrix
94b6438bcaf43a95f590236f452800d8e41d26c5
85,789
def jaccard_sim_filtering(sentences_df, sentece_col="sentence", threshold=0.8): """ Function used to filter sentences by Jaccard similarity --- **Arguments**\n `sentences_df` (DataFrame): DataFrame with sentences and which contains *sentence* column. \n `sentence_col` (String): Name of the sentence column in data frame. Default value = "sentence".\n `threshold` (float): Jaccard similarity score threshold used for filtering. Default value = 0.8. --- **Returns**\n `sentences_df` (DataFrame): DataFrame filtered by Jaccard similarity. """ sentence_set_list = sentences_df[sentece_col].str.split(' ').apply(lambda x: set(x)).values for i in range(0, len(sentence_set_list), 1): for j in range(i + 1, len(sentence_set_list), 1): a = sentence_set_list[i] b = sentence_set_list[j] c = a.intersection(b) sim_score = float(len(c)) / (len(a) + len(b) - len(c)) if sim_score > threshold: sentences_df.loc[i, sentece_col] = 'FILTERED' break return sentences_df[~(sentences_df[sentece_col] == 'FILTERED')].reset_index(drop=True)
70aa8629c6857be93aafeb723b68a7e359d336ed
313,439
import torch def get_uniform_delta(shape, eps, requires_grad=True): """ Generates a troch uniform random matrix of shape within +-eps. :param shape: the tensor shape to create. :param eps: the epsilon bounds 0+-eps for the uniform random tensor. :param requires_grad: whether the tensor requires a gradient. """ delta = torch.zeros(shape).cuda() delta.uniform_(-eps, eps) delta.requires_grad = requires_grad return delta
08394a2f6c611123e8c4473e0dcd88f7cab927df
564,334
def s2c_stereographic(sph): """ Stereographic projection from the sphere to the plane. """ u = sph[..., 0] v = sph[..., 1] w = sph[..., 2] return (u + 1j*v)/(1+w)
1aff6cf6accd6bb26c647f014dc964404e84b979
699,046
import struct def py_int2signed_word(val, be=False): """ Converts Python int value to word of bytes. """ sig = '>h' if be else '<h' return struct.pack(sig, val)
5ebf82c3e151c31098604619d0e549214d27f486
601,871
def props_boot(props): """Accesses boot properties.""" return props["boot"]
aed52522fac4349ec88414edda227e7743194143
74,884
def passiveAll2Freq(x, sqe): """ Compute passive coupling, (single frequency signal, of unknown frequency): .. math:: \\mathcal{P}(x,\\varepsilon) = \\frac{x}{1-x\\sqrt{\\varepsilon}} """ return x / (1.0 - x * sqe)
02cf2409bca898a421cd5666ac4b73c2f65cae03
164,849
from typing import Any def arghash(args: Any, kwargs: Any) -> int: """Simple argument hash with kwargs sorted.""" sorted_args = tuple( x if hasattr(x, "__repr__") else x for x in [*args, *sorted(kwargs.items())] ) return hash(sorted_args)
c3e95c63831c958bb2a52cabad9f2ce576a4fed8
702,334
def escape_slack_characters(raw: str) -> str: """Escape the special characters that are used by Slack in their messaging API. See `Slack API docs <https://api.slack.com/reference/surfaces/formatting#escaping>`_. Args: raw (str): String to be escaped Returns: (str) String with problematic escape strings """ # Escape & out = raw.replace("&", "&amp;") # Escape < and > return out.replace("<", "&lt;").replace(">", "&gt;")
876bb0e2880eae02633e3994e5a829fdddd835c5
158,354
def to_dict(items, key, value): """ Transforms a list of items to a Key/Value dictionary """ if items: return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) else: return dict()
db2c3b36743bf1add55cb84fba9d5e6ebfe7e8bb
553,933
from typing import List def _get_missing_tarred_files(tarred_files: List[str], untarred_files: List[str]) -> List[str]: """Get all files in the tarred list that are not in the untarred list.""" diff = set(tarred_files) - set(untarred_files) return list(diff)
bd51424cfc1962c1c30b2ce04382c5e3e70355b4
345,263
import re def clean_word(word): """Lower-case and remove uncommon characters from a word.""" return re.compile('[^a-zA-Z0-9_]').sub('', str(word.lower()))
b8d95006a276f60315193a574c720987c8b88766
317,384
import math def compute_PL(p_t, awgn, snr_threshold, p_non): """ :param p_t : transmit power (dBm); :param awgn: additive white Gaussian noise (dBm); :param snr_threshold: required SNR threshold (dB); :param p_non: non-outage probability; :return PL: breaking-point PL (dB) """ def _dBm_2_Watt(x): return 10 ** (x / 10) / 1000. def _dB_2_unit1(x): return 10 ** (x / 10) def _unit1_2_dB(x): return 10 * math.log10(x) PL = _unit1_2_dB(-math.log(p_non) * _dBm_2_Watt(p_t) / _dB_2_unit1(snr_threshold) / _dBm_2_Watt(awgn)) return PL
552a04ee8662d1fb3de743a997022fbb00d68b36
194,648
def str2bool (v): """ Convert string expression to boolean :param v: Input value :returns: Converted message as boolean type :rtype: bool """ return v.lower() in ("yes", "true", "t", "1")
bbf3439c923b400d433a4c6475b2f28dee3c3e24
409,998
def _is_label(src): """Determines whether the provided string is a label. Args: src: A `string` value. Returns: A `bool` specifying whether the `string` value looks like a label. """ return src.find("//") > -1 or src.find(":") > -1
d564b49f88573aa161f1a5c9b4dc457fbf7c28a3
134,412
def get_intersect_point(a1, b1, a2, b2): """ The point of intersection of two lines. If lines parallel then None is returned """ if a1 is None and a2 is None: return None, None if a1 is None and abs(a2 - 0.0) < 1e-6: return b1, b2 if a2 is None and abs(a1 - 0.0) < 1e-6: return b2, b1 if abs(a2 - a1) < 1e-6: return None, None x = (b2 - b1) / (a1 - a2) y = (a1 * b2 - b1 * a2) / (a1 - a2) return x, y
67c466c7c3962f29a50de16a108722ea9be25055
530,959
def powMod(base, exp, mod): """ Returns base to the power of exp modulus mod. """ result = 1 if exp == 0: return result A = base if 1 & exp: # Last bit of exp is 1 => is pair. result = base exp = exp >> 1 while exp: A = (A**2) % mod if 1 & exp: result = (result * A) % mod exp = exp >> 1 return result
8f16a52031ddb459a8900a8ade4fcf20412002e0
234,715
def get_from_list(x, ys: list, remove=False): """ Returns value from list. Optionally removes. Args: x: item value ys: list remove: removes item from list Returns: item """ res = None if x in ys: res = x if remove: ys.remove(x) return res
1076d130ce7ee546c67347d054b3e09e292513b1
502,439
def parse_list_to_string(tags): """ Parses a list of tags into a single string with the tags separated by comma :param tags: A list of tags :return: A string with tags separated by comma """ return ', '.join(tags)
e6443a5941a0ba9a2488735e651c1dcca81dba5f
658,176
def remove_empty_from_dict(original): """get a new dict which removes keys with empty values :param dict original: original dict, should not be None :return: a new dict which removes keys with empty values """ return dict((k, v) for k, v in original.iteritems() if v)
fd6d4dcde6eae1da45fbc3fc9e49597779205945
199,216
def crop_box(box, height, width, image): """ Crop the bounding box from the original image Args: box: bounding box points as a list height: height of the original image width: width of the original image image: the original image Returns: cropped image """ ymin = int(box[0] * height) xmin = int(box[1] * width) ymax = int(box[2] * height) xmax = int(box[3] * width) return image[ymin:ymax, xmin:xmax, :]
6ffcb687b1b651d81ff0941fbedc4df86496b6b4
173,100
def reduce(arrays): """ Укорачивает списки до длины меньшего списка. Параметры: arrays - список, состоящий из списков. Например, список из синусоид, сгеренированных с помощью функции generate. Тип - list. Возвращает список, состоящий из списков. Тип - list. """ # Вычисляется минимальная длина среди всех списков min = len(arrays[0]) for array in arrays: if len(array) < min: min = len(array) # Все списки укорачиваются до длины самого короткого arrays = [array[0:min] for array in arrays] return arrays
f041977afda5baec6046f84a8b628b22f09a41b2
170,231
def choice_of_c_constants(prev_c, **optional_args): """Function that returns the new constants that will be used in the iteration knowing the previous ones. The returned values for the two constants should be such that there are enough models to find enough parents and also should be bounded in ... Args: prev_c ([type]): [description] Returns: int, int: The two contants with the smaller one before and the bigger one after """ c_1 = prev_c - 1 if not(prev_c == 1) else prev_c c_2 = prev_c + 1 if not(prev_c == 3) else prev_c return c_1, c_2
d9b61421433a52d062a27ea5210953a29cbf9436
57,402
def ellipsize(buf, l): """Return a possibly-truncated version of buf to maximum length l, adding ellipsis if the string is truncated.""" if l < 4: l = 4 if len(buf) >= l: buf = buf[:l-3] + '...' return buf
59e4b8f1afc558c7af70db7e6c69ff0c8b9ebeb9
258,592
def hexToRGB01(hexColor): """ Return a hex color string as an RGB tuple of floats in the range 0..1 """ h = hexColor.lstrip('#') return tuple([x / 255.0 for x in [int(h[i:i + 2], 16) for i in (0, 2, 4)]])
992bf6cb5c2dd6944bfb44b1c6b93138b07602e0
358,387
from typing import Iterable from typing import Any from typing import Callable def serialize_multi( value: Iterable[Any], serialize: Callable[[Any], str], separator: str = ";" ) -> str: """Serialize a list or set of values into a string. Each value is serialized with the ``serializer`` and the values are joined with the ``separator``. Args: value (Iterable[Any]): the list or set of values to serialize serialize (Callable[[Any], str]): the serializer to use for single values separator (str, optional): the seperator to use between values. Must not be part of the serialized values! Defaults to ";". Returns: str: the string of serialized items. """ return separator.join(serialize(val) for val in value)
dd9db33c6181298d6094e9547824316ceba5d165
307,933
from datetime import datetime def datetime_convert(value): """转换时间字符串为时间对象""" # value为字符串时转换 if isinstance(value, str): return datetime.strptime(value, "%Y-%m-%d %H:%M:%S") # value为时间对象时直接返回 return value
ab61ae9586813cdc96b9369d41b9b36665812138
460,025
def _is_dunder(name: str) -> bool: """Return True if a __dunder__ name, False otherwise.""" return name[:2] == name[-2:] == "__" and name[2:3] != "_" and name[-3:-2] != "_" and len(name) > 4
532b16288d3807a78eb5f38238afc037c1143528
177,286
def table(lst): """ Takes a list of iterables and returns them as a nicely formatted table. All values must be convertible to a str, or else a ValueError will be raised. N.B. I thought Python's standard library had a module that did this (or maybe it was Go's standard library), but I'm on an airplane and pydoc sucks. """ pad = 2 maxcols = [] output = [] first_row = True for row in lst: if row is None: output.append([]) continue output_row = [] for i, cell in enumerate(row): cell = str(cell) if first_row: maxcols.append(len(cell) + pad) else: maxcols[i] = max([maxcols[i], len(cell) + pad]) output_row.append(cell) output.append(output_row) first_row = False rowsep = '-' * sum(maxcols) nice = [] for i, row in enumerate(output): nice_row = [] for j, cell in enumerate(row): nice_row.append(cell.rjust(maxcols[j])) nice.append(''.join(nice_row)) if i < len(output) - 1: nice.append(rowsep) return '\n'.join(nice)
b85fba6bd376810e4dfe206f7e7e053955ebec54
673,619
import inspect def super_class_property(*args, **kwargs): """ A class decorator that adds the class' name in lowercase as a property of it's superclass with a value constructed using the subclass' constructor with the given arguments. So for example: class A: pass @super_class_property(foo=5) class B(A): def __init__(self, foo=3): self.foo=foo Effectively results in the following, after the definition of B: A.b = B(foo=5) Can be used multiple times with different arguments if desired. """ def add_superclass_property(cls): nonlocal args, kwargs mro = inspect.getmro(cls) if len(mro) <= 2: raise TypeError( ( "Class {} can't be a super_class_property because it has no super " "class." ).format(cls) ) parent = mro[1] instance = cls(*args, **kwargs) setattr(parent, cls.__name__.lower(), instance) return cls return add_superclass_property
ecfd38ba3d7ea96266278ed6be6cf0ba87263d7d
705,232
import re def clean_progress_output(output): """Return output cleaned from \\r, \\n, and ANSI escape sequences""" return re.sub( r"""(?x) # Matches: \n|\r| # 1. newlines or carriage returns, or (\x1b\[|\x9b) # 2. ANSI control sequence introducer ("ESC[" or single # byte \x9b) + [^@-_]*[@-_]| # private mode characters + command character, or \x1b[@-_] # 3. ANSI control codes without sequence introducer # ("ESC" + single command character) """, '', output)
552c6d3ac3cd69d8020abba647f91d73d11052d4
437,685
import torch def truncated_normal(size, std): """ Pytorch does not have a truncated normal function to we manually make one in order to cut the dependancy on tensorflow Modified Version of: https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/20 """ mean = 0 tensor = torch.zeros(size) tmp = tensor.new_empty(size + (4,)).normal_() valid = (tmp < 2) & (tmp > -2) ind = valid.max(-1, keepdim=True)[1] tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1)) tensor.data.mul_(std).add_(mean) return tensor
0064f5a5c97809072b9493f823667d4036921d50
687,723
import click def style_prompt(message): """Returns a unified style for click prompts.""" return click.style(message, fg="cyan")
f04246be8f5b6afd689c46eae5529d3565f8fb7d
656,692
from typing import OrderedDict def load_mhd_header(filename): """Return an OrderedDict containing metadata loaded from an mhd file.""" metadata = OrderedDict() with open(filename) as header_file: for line in header_file: (key, val) = [x.strip() for x in line.split("=")] if key in ['ElementSpacing', 'Offset', 'CenterOfRotation', 'TransformMatrix', 'ElementSize']: new_val = [float(s) for s in val.split()] elif key in ['NDims', 'ElementNumberOfChannels']: new_val = int(val) elif key in ['DimSize']: new_val = [int(s) for s in val.split()] elif key in ['BinaryData', 'BinaryDataByteOrderMSB', 'CompressedData']: # pylint: disable=simplifiable-if-statement if val.lower() == "true": new_val = True else: new_val = False else: new_val = val metadata[key] = new_val return metadata
f69031727735f0279c895aea2d70701efbfe0a38
608,613
import re def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible """ def replace_insane(char): if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '_' return char # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if not result: result = '_' return result
a86c98bd22e6f828ba9851a84c51e92f6f27a095
164,882
def get_all_classes(self): """ Retrieves the list of classe names in this dataset """ return [name for name in self.name_to_class_info.keys()]
6b66528f2c2b62023615467b9386e7a81e3194ca
99,851
def calc_sc_carter(slr, ecc, x): """ Compute carter constant for the SC case (a = 0). Parameters: slr (float): semi-latus rectum [6, inf) ecc (float): eccentricity [0, 1) x (float): inclination value given by cos(theta_inc) (0, 1] negative x -> retrograde positive x -> prograde Returns: Q (float): Carter constant """ ecc2 = ecc * ecc slr2 = slr * slr x2 = x * x Q = (slr2 * (-1 + x2)) / (3 + ecc2 - slr) return Q
0b1213c83494f806b67b60321bbd90e02d007c83
93,594
import json def get_metrics_collections(file_paths): """ Reads the json files to be averaged and returns the json contents :param file_paths: A list of file paths :return: A list of dictionaries, which represent the metrics obtained from one run of Collector """ metrics_collections = [] for file_path in file_paths: with open(file_path) as f: metrics_collections.append(json.load(f)) return metrics_collections
4db2fbf3b0e63629a76a64bc4e4107fff8680e3f
52,469
import collections def secs_to_text(cs): """Convert time in seconds to a human readable string. """ v = collections.OrderedDict() v['s'] = cs % 60 cs //= 60 v['m'] = cs % 60 cs //= 60 v['h'] = cs % 24 cs //= 24 v['d'] = cs parts = [] for k in v: if v[k] != 0: parts.append("{}{}".format(v[k], k)) return "_".join(parts[::-1])
f5d9cc66cbf6815fc765700309eb0f3124e58192
664,323
from unittest.mock import Mock def mock_data_manager(components): """Return a mock data manager of a general model.""" dm = Mock() dm.components = components dm.fixed_components = [] return dm
e796dbe73e2ec7df650ceab450a3a5449a6af9ed
703,879
def get_bond_order(molecule, bond_index): """Get the order of bond for a specified index. Returns the order of bond (whether it's a single/double/triple bond) present at bond_index. Parameters ---------- molecule : Molecule The molecule to which the bond belongs. bond_index : int Index of the bond whose order is to be obtained. """ return molecule.GetBondOrder(bond_index)
143d27f7e3f7da06d99bc85fd004370d8e295aa9
497,695
import mimetypes import cgi def _get_content_type(conn) -> str: """Pull out mime type from a connection. Prefer explicit header if available, otherwise guess from url. """ content_type = mimetypes.guess_type(conn.url)[0] or "" if hasattr(conn, "getheaders"): content_type = dict(conn.getheaders()).get("Content-Type", content_type) return cgi.parse_header(content_type)[0]
665b72fa653429e421a949d3e2b9fc722513a46e
573,081
def c(k): """Capitalize or decapitalize one letter depending on its ascii value.""" if isinstance(k, str): return k.lower() if ord(k) % 2 == 0 else k.upper() return k
9c7e4ac19f4fedb1ce6c9d5d5cf7e12088ec3bd0
147,536
def get_trigger_status(code): """Get trigger status from code.""" trigger_status = {0: "Enable", 1: "Disable"} if code in trigger_status: return trigger_status[code] + " (" + str(code) + ")" return "Unknown ({})".format(str(code))
d1165171ca72e7217295b9497091d459721b8619
138,084
def get_number_of_annotations_per_label(annotations, labels): """ Gets the number of annotations per label :param annotations: a list of labelled (annotated) examples :param labels: a list of labels used when annotated :return: annotations_count: a dictionary of counts per label """ annotations_count = {} chosen_labels_list = [annotated_labels for annotated_labels in annotations.values()] for label in labels: annotations_count[label] = sum(label in chosen_labels for chosen_labels in chosen_labels_list) return annotations_count
076d9be229252c3e558d60890725b8c9e66f60a2
387,910
def extend_params(params, more_params): """Extends dictionary with new values. Args: params: A dictionary more_params: A dictionary Returns: A dictionary which combines keys from both dictionaries. Raises: ValueError: if dicts have the same key. """ for yak in more_params: if yak in params: raise ValueError('Key "%s" is already in dict' % yak) params.update(more_params) return params
626db0ae8d8a249b8c0b1721b7a2e0f1d4c084b8
910
def aic(log_likelihood, k): """Returns the score for the Akaike information criterion (AIC-score) for the given log-likelihood and number of parameters k. :param log_likelihood: The maximized value of the likelihood function for the model :type log_likelihood: float :param k: The number of parameters :type k: int :return: The score for the Akaike information criterion (AIC-score) for the given log-likelihood and number of parameters k :rtype: float """ return 2 * k - 2 * log_likelihood
7997947e5b394b9e808f881bb2c5e5594160cc4e
314,521
def process_confidence_threshold(netblock_list, threshold): """Returns results that are greater than given threshold. Args: netblock_list: A list of netblocks with identifying information. threshold: The threshold at which confidence scores lower than this number should be excluded. Returns: A modified netblock_list that excludes all netblocks with a confidence score less than the threshold. """ return_list = [] for sub_list in netblock_list: if sub_list[4] >= threshold: return_list.append(sub_list) return return_list
f374391e049d6374831cf88b70aef8f493eff839
339,005
def extract_entities_from_tagged(annotated_tokens, tags): """ The method takes a list of tokens annotated with the Stanford NE annotation scheme and produces a list of entites. :param annotated_tokens: list of tupels where the first element is a token and the second is the annotation :return: list of entities each represented by the corresponding token ids Tests: >>> extract_entities_from_tagged([('what', 'O'), ('character', 'O'), ('did', 'O'), ('natalie', 'PERSON'), ('portman', 'PERSON'), ('play', 'O'), ('in', 'O'), ('star', 'O'), ('wars', 'O'), ('?', 'O')], tags={'PERSON'}) [[3, 4]] >>> extract_entities_from_tagged([('Who', 'O'), ('was', 'O'), ('john', 'PERSON'), ('noble', 'PERSON')], tags={'PERSON'}) [[2, 3]] >>> extract_entities_from_tagged([(w, 'NE' if t != 'O' else 'O') for w, t in [('Who', 'O'), ('played', 'O'), ('Aragorn', 'PERSON'), ('in', 'O'), ('the', 'ORG'), ('Hobbit', 'ORG'), ('?', 'O')]], tags={'NE'}) [[2], [4, 5]] """ vertices = [] current_vertex = [] for i, (w, t) in enumerate(annotated_tokens): if t in tags: current_vertex.append(i) elif len(current_vertex) > 0: vertices.append(current_vertex) current_vertex = [] if len(current_vertex) > 0: vertices.append(current_vertex) return vertices
d204ba9efebb93afc3755e82546cc0c70835fd11
291,740
def _remove_atom_numbers_from_distance(feat_str): """ Remove atom numbers from a distance feature string. Parameters ---------- feat_str : str The string describing a single feature. Returns ------- new_feat : str The feature string without atom numbers. """ # Split the feature string in its parts parts = feat_str.split(' ') # Glue the desired parts back together new_feat = parts[0] for nr in [1,2,3,5,6,7,8]: new_feat += ' '+parts[nr] return new_feat
95daddf73364d3fb619c7fbba0ac2d57a3680952
65,190
def decode_labelme_shape(encoded_shape): """ Decode the cnn json shape (usually encoded from labelme format) Return a list of points that are used in labelme """ assert isinstance(encoded_shape, str) points = encoded_shape.split(',') shape = list() for point in points: x, y = point.split('+') shape.append([float(x), float(y)]) return shape
33e6875c8b84ccef508012a7e211edc580ec6dca
473,022
def SafeEval(evalstr, default=None, globalz=None, localz=None): """A safer wrapper for eval().""" if not globalz: globalz = {} if not localz: localz = {} try: return eval(evalstr, globalz, localz) except SyntaxError: return default if default else evalstr
7584b88ab72969221ed26fc5d21ca8cc9c7ce7c8
485,352
def calc_tree_depth(n_features, max_depth=5): """Calculate tree depth Args: n_features (int): Number of features max_depth (int, optional): Max depth tree. Defaults to 15. Returns: [int]: Final depth tree """ # Designed using balls-in-bins probability. See the paper for details. m = float(n_features) depth = 0 expected_empty = m # the number of unique attributes not selected so far while expected_empty > m / 2: # repeat until we have less than half the attributes being empty expected_empty = m * ((m - 1) / m) ** depth depth += 1 # the above was only for half the numerical attributes. now add half the categorical attributes return min(max_depth, depth)
c818e7606d0b6661c33d2d32487e35c316279420
98,852
def tags_key_value_matches_regex(aws_object, key, regex): """ Return true if aws_object's key key matches regex, otherwise False. :param aws_object: A boto3 aws object to check :param key: Tag to compare against :type key: str :param regex: Regex to match :type regex: re :return: True or False, if there was a match :rtype: bool """ tags = aws_object["Tags"] return any( tag for tag in tags if tag["Key"] == key and regex.match(tag["Value"]) )
ada2b55aa3c2af8ededdb5cdc800401a4b2b2d41
479,958
def MergeResultsFromMultipleFiles(results_by_file, info_levels): """Merge multiple results to show occurrences in files and range of values. Args: results_by_file: A dict of check results indexed by file name. info_levels: A list of report levels to merge. E.g., ['warning', 'error'] Returns: A dict of check results in the form of: {`check_name`: {`file_name`: [lower_bound, upper_bound]}} """ merged = {} for filename, results in results_by_file.iteritems(): if results: for check_name, values in results.iteritems(): bounds = [float('inf'), -float('inf')] tracebacks = set() # Example `values`: # {'warning', {'count': 100, 'range': [0, 1]}} is_set = False for field, details in values.iteritems(): if field in info_levels: if 'range' in details: bounds[0] = min(bounds[0], details['range'][0]) bounds[1] = max(bounds[1], details['range'][1]) is_set = True if 'traceback' in details: tracebacks.add(details['traceback']) report = {} if is_set: report['range'] = bounds if tracebacks: report['traceback'] = list(tracebacks) if report: if check_name not in merged: merged[check_name] = {} merged[check_name][filename] = report return merged
cacaea2e5df8f8db89db792ccf9b0be6eeec3760
620,409
def _ToList(grouped_occurrences_dict): """Converts grouped_occurrences from a dict to a list, and sort the groups by the most recent occurrence time, in descending order. dom_repeat only accepts array but not json, so converts the grouped_occurrences_dict to a list. Args: grouped_occurrences_dict(dict): A dict of grouped occurrence dicts. Like { 'group1': [ occurrence1_dict, occurrence2_dict ], 'group2': [ occurrence3_dict, occurrence4_dict ] } Returns: (list): A list of grouped occurrence dicts. Like [ { 'group_by_field': 'group1', 'occurrences': [ occurrence1_dict, occurrence2_dict ] }, { 'group_by_field': 'group2', 'occurrences': [ occurrence3_dict, occurrence4_dict ] } ] """ grouped_occurrences_by_most_recent_occurrence = [{ 'group_by_field': group_by_field, 'occurrences': occurrences } for group_by_field, occurrences in grouped_occurrences_dict.iteritems()] grouped_occurrences_by_most_recent_occurrence.sort( key=lambda e: e['occurrences'][0]['time_happened'], reverse=True) return grouped_occurrences_by_most_recent_occurrence
fcff87f0a3fe26e6f33fb26907718385feea6ef3
328,974
def check_replication(service, service_replication, warn_range, crit_range): """Check for sufficient replication of a service :param service: A string representing the name of the service this replication check is relevant to. :param service_replication: An int representing the number of available service instances :param warn_range: A two tuple of integers representing the minimum and maximum allowed replication before entering the WARNING state. :param crit_range: A two tuple of integers representing the minimum and maximum allowed replication before entering the CRITICAL state. Note that all ranges are closed interval. If the replication is outside the closed interval for the relevant level (e.g. warning, critical), then the error code will change appropriately. :returns check_result: A tuple of error code and a human readable error message. The error codes conform to the nagios plugin api. e.g. for an OK service (0, "OK lucy has 1 instance(s)") e.g. for a CRITICAL service (2, "CRITICAL lucy has 0 instance(s), expected value in [1, 1e18]) """ code, status, interval = 0, 'OK', None if not (crit_range[0] <= service_replication <= crit_range[1]): code, status, interval = 2, 'CRITICAL', crit_range elif not (warn_range[0] <= service_replication <= warn_range[1]): code, status, interval = 1, 'WARNING', warn_range expected_message = "" if interval is not None: expected_message = ", expected value in {0}".format(interval) message = "{0} {1} has {2} instance(s){3}".format( status, service, service_replication, expected_message ) return code, message
ac515bd0881431eebd0e14d0578f0bb1c07835f3
15,919
def _GetMsgId(msg_start, line_number, msg_start_table): """Construct the meessage id given the msg_start and the line number.""" hex_str = '%x%04x' % (msg_start_table[msg_start], line_number) return int(hex_str, 16)
bb3aec39838ccfb5e2c74bafb6f50b64b25716c2
277,196
def setter_decorator(fset): """ Define a write-only property that, in addition to the given setter function, also provides a setter decorator defined as the property's getter function. This allows one to set the property either through traditional assignment, as a method argument, or through decoration:: class Widget(object): @setter_decorator def handler(self, value): self._handler = value widget = Widget() # Method 1: Traditional assignment widget.handler = lambda input: process(input) # Method 2: Assignment via method argument widget.handler(lambda input: process(input)) # Method 3: Assignment via decoration @widget.handler def handler(input): return process(input) # Method 3b: Assignment via decoration with extraneous parens @widget.handler() def handler(input): return process(input) """ def fget(self): def inner(value): fset(self, value) def outer(value=None): if value: # We are being called with the desired value, either directly or # as a decorator. inner(value) else: # Assume we are being called as a decorator with extraneous parens, # so return the setter as the actual decorator. return inner return outer fdoc = fset.__doc__ return property(fget, fset, None, fdoc)
6a81aaf4c837ef958582c1064cb37ea856d3c0af
238,113
import hashlib def hash_one(n): """A somewhat CPU-intensive task.""" for i in range(1, n): hashlib.pbkdf2_hmac("sha256", b"password", b"salt", i * 1000) return "done"
521eea5576935cb024880f316653ac89634552b1
97,386
from typing import Tuple def separate_wind(text: str) -> Tuple[str, str, str]: """Extracts the direction, speed, and gust from a wind element""" direction, speed, gust = "", "", "" # Remove gust if "G" in text: g_index = text.find("G") start, end = g_index + 1, g_index + 3 # 16006GP99KT ie gust greater than if "GP" in text: end += 1 gust = text[start:end] text = text[:g_index] + text[end:] if text: # 10G18KT if len(text) == 2: speed = text else: direction = text[:3] speed = text[3:] return direction, speed, gust
799f5f1b36dc53fbfeb10c655c8ae3b735963c06
212,588
def datetime_to_ts(date_time): """Convert a DateTime object into a WARC 1.0 timestamp.""" return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
672733bb83888535f9b72f51a8d456b0bd92efd4
114,064
def sarsa_estimate(rewards, next_states, dones, Q, policy, gamma=1., discrete_actions=True): """ Sarsa estimator of the value of the current state-action pair given observations of the reward and next state. Can work on single observations or batches. Note this function expects the `policy` to be consistent with Q (i.e. it won't check this). True Sarsa requires `policy` to be the policy currently used by the agent, but this function can also be used for deep Q-learning or DDPG, i.e. off-policy. If `discrete_actions` is true, the Q function is assumed to map states to action value arrays; if `discrete_actions` is false, the Q function is assumed to map state-action value pairs to corresponding value estimates. Params ====== rewards (Tensor): reward(s) for taking current action(s) next_states (Tensor): state(s) reached after taking current action(s) dones (Tensor): done flag(s) for next state(s) Q (nn.Module): Q module with properties described above policy (nn.Module): policy derived from Q (e.g. `EpsilonGreedyPolicy`) gamma (float): discount factor discrete_actions (bool): determines the action space type """ next_actions = policy.act(next_states) if discrete_actions: # The policy may have collapsed dimensions by one (i.e. passing a # single state returns a scalar action, passing an array of states # returns a 1D array), so we may need to unsqueeze to prepare for gather if len(next_actions.shape) < len(next_states.shape): next_actions = next_actions.unsqueeze(-1) next_values = Q(next_states).gather(-1, next_actions) next_values = next_values.squeeze() # collapse dimensions again else: next_values = Q(next_states, next_actions).squeeze() return rewards + gamma * (1. - dones) * next_values
6169569300aacf7400083fe54332c74b00f6091b
259,900
import json def parse_graphviz_json(json_string): """Parses JSON representation of graphviz graph. Args: json_string: graphviz graph representation in JSON string format. Returns: graph in a dictionary format. """ d = json.loads(json_string) gv_graph = {} node_id_to_name = {} for obj in d["objects"]: node_id_to_name[obj["_gvid"]] = obj["name"] gv_graph[obj["name"]] = list() for edge in d["edges"]: gv_graph[node_id_to_name[edge["tail"]]].append( node_id_to_name[edge["head"]]) return gv_graph
f22af1a81e08feef3aeccd3c5562c1c958feffc7
559,364
import hashlib def getImageHash(img): """ Calculates md5 hash for a given Pillow image. """ md5hash = hashlib.md5(img.tobytes()) return md5hash.hexdigest()
d7bd7e1857f6849143f07063c045ae206985d4a3
9,634
def get_etr_g(glass_type, attachment): """ガラスの垂直面日射熱取得率 (-) Args: glass_type(str): ガラスの仕様 attachment(str): 付属部材 Returns: float: ガラスの垂直面日射熱取得率 (-) """ # 表1 ガラスの垂直面日射熱取得率 table_1 = { '2枚以上のガラス表面にLow-E膜を使用したLow-E三層複層ガラス(日射取得型)': (0.54, 0.34, 0.12), '2枚以上のガラス表面にLow-E膜を使用したLow-E三層複層ガラス(日射遮蔽型)': (0.33, 0.22, 0.08), 'Low-E三層複層ガラス(日射取得型)': (0.59, 0.37, 0.14), 'Low-E三層複層ガラス(日射遮蔽型)': (0.37, 0.25, 0.10), '三層複層ガラス': (0.72, 0.38, 0.18), 'Low-E二層複層ガラス(日射取得型)': (0.64, 0.38, 0.15), 'Low-E二層複層ガラス(日射遮蔽型)': (0.40, 0.26, 0.11), '二層複層ガラス': (0.79, 0.38, 0.17), '単板ガラス2枚を組み合わせたもの': (0.79, 0.38, 0.17), '単板ガラス': (0.88, 0.38, 0.19) } i = {'付属部材なし': 0, '和障子': 1, '外付けブラインド': 2}[attachment] return table_1[glass_type][i]
eb28103b1dbf1fe9fa6d88bf7fd5a4d8f1409017
275,214
def format_obj_keys(obj, formatter): """ Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' } """ if type(obj) == list: return [format_obj_keys(o, formatter) for o in obj] elif type(obj) == dict: return {formatter(k): format_obj_keys(v, formatter) for k, v in obj.items()} else: return obj
9d4e5ea2692e3e65e2b12c2134c775ef16e0a6d7
61,532
def unmap(widget): """Unmap a mapped WIDGET.""" result = False if widget and widget.winfo_exists() and widget.winfo_ismapped(): result = True geom_mgr = widget.winfo_manager() if geom_mgr == "grid": widget.grid_forget() elif geom_mgr == "pack": widget.pack_forget() elif geom_mgr == "place": widget.place_forget() else: result = False return result
4f80fb2ccbb92d1cdd8e20b2912885cf47a57c22
402,767
def is_multiallelic(variant): """Does variant have multiple alt alleles? Args: variant: third_party.nucleus.protos.Variant. Returns: True if variant has more than one alt allele. """ return len(variant.alternate_bases) > 1
c170379ceced883d7569ad21b9c13d8a711d1536
64,937
def _shorten(code_list): """ Shortens a list of numeric nomis geo codes into a string format where contiguous values are represented as ranges, e.g. 1,2,3,6,7,8,9,10 -> "1...3,6,7...10" which can drastically reduce the length of the query url """ # empty evals to False if not code_list: return "" if len(code_list) == 1: return str(code_list[0]) code_list.sort() # assume this is a modifying operation short_string = "" index0 = 0 index1 = 0 # appease lint for index1 in range(1, len(code_list)): if code_list[index1] != (code_list[index1-1] + 1): if index0 == index1: short_string += str(code_list[index0]) + "," else: short_string += str(code_list[index0]) + "..." + str(code_list[index1-1]) + "," index0 = index1 if index0 == index1: short_string += str(code_list[index0]) else: short_string += str(code_list[index0]) + "..." + str(code_list[index1]) return short_string
fca49f6bc25118e2d68805544572b4883d819214
83,449
def get_color_from_similarity(similarity_score: float) -> str: """ Return css style according to similarity score """ if float(similarity_score) > 15: return "#990033; font-weight: bold" if float(similarity_score) > 10: return "#ff6600" if float(similarity_score) > 5: return "#ffcc00" return "green"
3525a026cdbe9a712524aef373bd296502803c38
312,123
from typing import List def orderBook_keys() -> List[str]: """Expect below keys as result of `depth` API endpoint call.""" return ['asks', 'bids', 'lastUpdateId']
b297389f4f03a8084a0d7e357b37389fe6fa7aca
340,447
import torch def array_shape(array): """Computes the shape of tensor that would be created from given array""" return torch.tensor(array).shape
01acfdb031ebf9afcc7e6cbc2a96e2336b8f80a8
354,566
import typing def validate_list(expected, lst: list) -> typing.Union[str, bool]: """ Validate a list against our expected schema. Returns False if the list is valid. Returns error in string format if invalid. """ if not isinstance(lst, list): return "Expected argument of type `%s`, got `%s`" % ( str(expected).replace("typing.", ""), type(lst).__name__, ) each_arg_type = typing.get_args(expected)[0] for item in lst: if not isinstance(item, each_arg_type): return "Not all list items are of expected value, `%s`, found `%s`" % ( each_arg_type.__name__, type(item).__name__, ) return False
1318eaa1b8b4b1730b356475d5b2a3bdc037405f
673,211
def get_sequential_chunk(tests, modulo, modulo_index, is_sorted=False): """ >>> get_sequential_chunk(range(10), 4, 0) [0, 1, 2] >>> get_sequential_chunk(range(10), 4, 1) [3, 4, 5] >>> get_sequential_chunk(range(10), 4, 2) [6, 7] >>> get_sequential_chunk(range(10), 4, 3) [8, 9] >>> get_sequential_chunk(range(10), 4, 4) [] >>> get_sequential_chunk(range(10), 4, 5) [] """ if not is_sorted: tests = sorted(tests) chunk_size = len(tests) // modulo not_used = len(tests) % modulo shift = chunk_size + (modulo_index < not_used) start = chunk_size * modulo_index + min(modulo_index, not_used) end = start + shift return [] if end > len(tests) else tests[start:end]
14d05143df534b5f854e644a8b694bf142ef7fa0
450,731
from typing import Dict def to_nvre(build_record: Dict): """ From a build record object (such as an entry returned by listTagged), returns the full nvre in the form n-v-r:E. """ nvr = build_record['nvr'] if 'epoch' in build_record and build_record["epoch"] and build_record["epoch"] != 'None': return f'{nvr}:{build_record["epoch"]}' return nvr
44e4444aec378ced8c1fbef2d1ffc29584a2c999
444,040
def tokens_to_surf(it): """ Given an iterator of segments as (type, value) tuples, reconstruct the surface string. """ return "".join(v for (t, v) in it if t == "surf")
9ea05ef4dd1d7c0990a63cd08b760787bb783185
364,299
import copy def Compu_B_Backw(MBackw, n): """Compute possible positions to shift monomials Parameters ---------- MBackw : list monomials to be shifted, for example, [[1], [2], [1]] n : int size of the NLFSR Return ---------- BBackw : list positions to shift monomials, for example, [1, 1, 3] """ BBackw = [] for i in range(len(MBackw)): m = copy.deepcopy(MBackw[i]) b = n - 1 - max(m) - 1 BBackw.append(b) return BBackw
7cdbc1562f1e9368550d7f82b55d1c48eedfe918
127,782
def syntax_highlighter(input: str) -> str: """.repo-metadata.json language field to syntax highlighter name.""" if input == "nodejs": return "javascript" return input
24f7b34ea0459dec4ed70ac227399a315d238685
461,803
def cleanupQuotes(text): """ Removes quotes if text starts and ends with them Args: text (str): Text to cleanup Returns: Text with quotes removed from start and end (if they existed) or original string (if not) """ if text.startswith('"') and text.endswith('"'): return text[1:-1] else: return text
6079cd9491e248f20d645ebc1fb9c99d073e18ac
651,470
def check_unique(dataframe, identifier_list): """ Verifies that dataframe is uniquely identified by the variables in identifier_list :param dataframe: a pandas dataframe :param identifier_list: a list of variable names. Must be contained in either the index or the variables of dataframe :return: a dataframe of identifiers that are not uniquely identified along with the counts of how many times they occurred. If the dataframe is uniquely identified, the function returns an empty dataframe """ unique_identifier = dataframe.groupby(by = identifier_list).count().iloc[:, 0] unique_identifier.name = 'Count' unique_identifier = unique_identifier[unique_identifier > 1] return unique_identifier.to_frame()
bcbae8ecc778c49fe345d019101547050e8905e1
427,293
def _get_new_steplist(reqs_to_keep, old_step_data, req_ids): """Returns a list similar to `old_step_data` but with unwanted requests removed. Uses the requests and request components in `old_step_data` and the entitiy ids in `req_ids` to determine which elements in `old_step_data` to keep. Parameters ---------- reqs_to_keep : dict Dictionary of requests and request components to keep old_step_data : list List of all the step data in the results file req_ids : dict Dictionary of entity ids for the entire results file Returns ------- list List of just the step data to keep """ # Start a new list with just the time element from `old_step_data` new_step_data = [old_step_data[1]] # Loop through the desired requests and components to pull elements # from `old_step_data` into `new_step_data` for request in reqs_to_keep: # For each desired request for req_comp in reqs_to_keep[request]: # For each desired request component, add that components # step data to `new_step_data` req_id = int(req_ids[request][req_comp]) new_step_data.append(old_step_data[req_id]) return new_step_data
61e3c88dda3fae29a10c91b4abfc02ed4762f22e
695,776
def generate(g, n): """Generate a (sub-)group using g as generator in modulus n""" result = [] for i in range(1,n): val = g ** i % n if val != None and val > 0: result.append(val) if val < 2: return result
96a755cacff7b0b0ae86d4725f64fce574e78ae1
353,327
def init_window(stdscr): """Init a window filling the entire screen with a border around it.""" stdscr.clear() stdscr.refresh() max_y, max_x = stdscr.getmaxyx() root_window = stdscr.derwin(max_y, max_x, 0, 0) root_window.box() return root_window
88134d9a8a8f4cdf1774ee0898e8597c3d82ddb4
565,948
import json def load_json(filename): """ loading json file :param filename: json file name :return: data as dict from json file """ with open(filename, "r") as f: data = json.load(f) return data
08b7873e4277598ef77239db88a86f52b51b404b
461,419
import struct def gatts_dec_attr_value_changed_ev_data(frame): """Decodes BTP Attribute Value Changed Event data Event data frame format 0 16 32 +--------------+-------------+------+ | Attribute ID | Data Length | Data | +--------------+-------------+------+ """ hdr = '<HH' hdr_len = struct.calcsize(hdr) (handle, data_len) = struct.unpack_from(hdr, frame) data = struct.unpack_from('%ds' % data_len, frame, hdr_len) return handle, data
5a3f17fd229b7e1c421f5df3a4c2666a8a27ff37
427,145
import six def encode_string(value): """ Encode unicode to string: unicode -> str, str -> str Arguments: value (str/unicode): string to encode Returns: encoded value (string) """ return value.encode('utf-8') if isinstance(value, six.text_type) else value
335f664db5ee34668ec0a63ba2bbcf651d1c1f68
174,308
def ellipsis_after(text, length): """ Truncates text and adds ellipses at the end. Does not truncate words in the middle. """ if not text or len(text) <= length: return text else: return text[:length].rsplit(' ', 1)[0]+u"\u2026"
9b15c5e8f63caec0a7327ae1ce872bac932208ab
15,222
def get_value_from_each(key, dict_list): """Return list of values for `key` in a list of dictionaries.""" return (d[key] for d in dict_list if key in d)
ad8c70a5d01edd34419597ccb600f93b15cb1065
485,667
def format_results(names, scalars): """Formats the results of training. Args: names: The names of the metrics. scalars: The values of the metrics. Returns: A string that contains the formatted scalars. """ res = [] for name, scalar in zip(names, scalars): res.append('%s: %2.3f' % (name, scalar)) return ', '.join(res)
8fa685fd267e8b583ca181a64352992c218f6904
410,933
def top_two_word(counts): """ Given a list of (word, count, percentage) tuples, return the top two word counts. """ limited_counts = counts[0:2] count_data = [count for (_, count, _) in limited_counts] return count_data
a84d4c7c82d66cd7b15e4f97e55d953a4e2bf400
530,264
def json_by_default_dispatcher(router, request, response): """WSGI router which defaults to 'application/json'.""" response.content_type = 'application/json' return router.default_dispatcher(request, response)
78439b1974842838aba8c598147a4a354e343928
158,825
def _is_valid_libsvm_label(libsvm_label): """Check if LIBSVM label is formatted like so: <label> if just label <label>:<instance_weight> if label and instance weight both exist :param libsvm_label: """ split_label = libsvm_label.split(":") if len(split_label) <= 2: for label_part in split_label: try: float(label_part) except: return False else: return False return True
5438b05e257817e019d01621159bfa5d5abcc668
462,610
import importlib def _class_by_name( module, className ): """ Helper function to extract a class from module """ # Import module mod = importlib.import_module(module) # Get class return getattr(mod, className)
9021dd4ac4828b701cfae74a50144ab6a119cd83
176,510
def deltafmt(delta, decimals=None): """ Returns a human readable representation of a time with the format: [[[Ih]Jm]K[.L]s For example: 6h5m23s If "decimals" is specified, the seconds will be output with that many decimal places. If not, there will be two places for times less than 1 minute, one place for times less than 10 minutes, and zero places otherwise """ try: delta = float(delta) except: return '(bad delta: %s)' % delta if delta < 60: if decimals is None: decimals = 2 return ("{0:." + str(decimals) + "f}s").format(delta) mins = int(delta / 60) secs = delta - mins * 60 if delta < 600: if decimals is None: decimals = 1 return ("{0:d}m{1:." + str(decimals) + "f}s").format(mins, secs) if decimals is None: decimals = 0 hours = int(mins / 60) mins -= hours * 60 if delta < 3600: return "{0:d}m{1:.0f}s".format(mins, secs) else: return ("{0:d}h{1:d}m{2:." + str(decimals) + "f}s").format(hours, mins, secs)
4274a784e112a473d758b8314100f949512cff75
448,820
import random import time def slow_square(n, timeout=5.0): """ Compute the square of an integer, slowly and unreliably. The input should be in the range 0-100. The larger the input, the longer the expected time to complete the operation, and the higher the likelihood of timeout. """ mean_time = (n + 5.0) / 5.0 sleep_time = random.expovariate(1.0 / mean_time) if sleep_time > timeout: time.sleep(timeout) raise RuntimeError("Calculation took too long.") else: time.sleep(sleep_time) return n * n
d0a63b9932bc5bd2ffc09f88d2fe28d60520597a
135,688
def filter_te_with_project(entries, project_name): """ Filters TEs by project name. """ return entries[entries['project'] == project_name]
9b23659b999afc219e976aa92d94d6c992e6f029
207,284
def the_last_50_entries(list_of_entries): """Simply returns the last 50 entries.""" return list_of_entries[-50:]
989288f9caca14bd5b168b35c81ba195e867d177
227,077