content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import asyncio async def subprocess_output(cmd, **kwargs): """ Run cmd until completion & return stdout, stderr Convenience method to start and run a process """ proc = await asyncio.create_subprocess_exec( *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, **kwargs) stdout, stderr = await proc.communicate() return stdout.decode(), stderr.decode()
b75097dd39c82d43f8d14c575d678341ec289e51
90,968
import gzip import json def load_reference_file(path): """ Load a list of merged otus documents from a file associated with a Virtool reference file. :param path: the path to the otus.json.gz file :type path: str :return: the otus data to import :rtype: dict """ with open(path, "rb") as handle: with gzip.open(handle, "rt") as gzip_file: return json.load(gzip_file)
dd4fb19e232ad375893668f9b081cedb6b571c0a
307,739
def parseBands(band, allBands, error): """Parses a string that specifies bands. Bands are horizontal rectangles defined with respect to lines. They correspond with regions of interest where we try to find specific marks, such as commas and accents. Parameters ---------- band: string or None or iterable If None: it means all bands. If a string: the name of a band. If an iterable: the items must be names of bands. allBands: tuple Names of all bands. error: function Method to write error messages. Returns ------- tuple The bands as parsed. """ sortedBands = sorted(allBands) doBands = ( allBands if band is None else set(band.split(",")) if type(band) is str else set(band) ) illegalBands = doBands - allBands if illegalBands: error(f"Will skip illegal bands: {', '.join(sorted(illegalBands))}") doBands -= illegalBands return tuple(b for b in sortedBands if b in doBands)
f502ddc4107d2440e914166866168c700fd5a6e2
646,901
def itemize(*items): """Restructured text formatted itemized list >>> print itemize('alpha', 'beta') * alpha * beta """ return '\n'.join(['* ' + item for item in items])
41339defc4e665a54dcf3a072c9ab95ab619ecc4
286,406
def cube(x): """The cube of a number Calculates and returns the cube of any floating-point number; note that, as currently written, the function also works for arrays of floats, ints, arrays of ints, and more generally, any number or array of numbers. Parameters ---------- x: float Number to cube Returns ------- float Cube of x Raises ------ No exceptions are raised. See Also -------- exampy.square: Square of a number exampy.Pow: a number raised to an arbitrary power Notes ----- Implements the standard cube function .. math:: f(x) = x^3 History: 2020-03-04: First implementation - Bovy (UofT) References ---------- .. [1] A. Mathematician, "x to the p-th power: squares, cubes, and their general form," J. Basic Math., vol. 2, pp. 2-3, 1864. """ return x**3.
13ffb5afb827cf8e0dc3addce1afe6d74f93e17f
543,604
import collections def collapseExpansions(expansions, numDims): """Scans through the given list of expansions (each assumed to pertain to a single 3D image), and combines any which cover the same image area, and cover adjacent volumes. :args expansions: A list of expansion slices - see :func:`calcExpansions`. :args numDims: Number of dimensions covered by each expansion, not including the volume dimension (i.e. 3 for a 4D image). :returns: A list of expansions, with equivalent expansions that cover adjacent images collapsed down. .. note:: For one expansion ``exp`` in the ``expansions`` list, this function assumes that the range at ``exp[numDims]`` contains the image to which ``exp`` pertains (i.e. ``exp[numDims] == (vol, vol + 1)``). """ if len(expansions) == 0: return [] commonExpansions = collections.OrderedDict() expansions = sorted(expansions) for exp in expansions: vol = exp[numDims][0] exp = tuple(exp[:numDims]) commonExps = commonExpansions.get(exp, None) if commonExps is None: commonExps = [] commonExpansions[exp] = commonExps for i, (vlo, vhi) in enumerate(commonExps): if vol >= vlo and vol < vhi: break elif vol == vlo - 1: commonExps[i] = vol, vhi break elif vol == vhi: commonExps[i] = vlo, vol + 1 break else: commonExps.append((vol, vol + 1)) collapsed = [] for exp, volRanges in commonExpansions.items(): for vlo, vhi in volRanges: newExp = list(exp) + [(vlo, vhi)] collapsed.append(newExp) return collapsed
073923a29d065ee21e26ef234135b4d358ecd288
696,730
def add_to_set(value, values): """ Add non-empty value to set of values """ if value: values.add(value) return values
97d999fb829556e0659e416537945fcec62fe059
491,696
import torch def sample_model_N_times(dVel0, dVel, v0, dv_l_const, v_max, n_layers, dz, dx, ny, nz, N): """Sample N earth models according to the generative model defined in Roeth and Tarantola 1994 Arguments: dVel0 {torch.distribution} -- Pytorch Distribution for the random velocity contribution of the initial layer velocity dVel {torch.distribution} -- Pytorch Distribution for the random velocity contribution of the next layer v0 {float} -- Initial Velocity dv_l_const {float} -- Constant velocity contribution per next layer v_max {float} -- Largest possible velocity in the model n_layers {int} -- Number of layers to generate dz {float} -- Depth increase [m] dx {float} -- Horizontal increase [m] ny {int} -- Number of offset gridblocks nz {int} -- Number of gridblocks in depth N {int} -- Number of earth models to generate Returns: models_th -- 2D Velocity Models velocities_th -- 1D Velocities per layer """ models = [] velocities = [] for i in range(N): model_true = torch.ones(nz, ny) vl = v0+dVel0.sample() vel_temp = [] for i in range(0, n_layers): vel_temp.append(vl) model_true[dz*i:dz*(i+1), :] = vl vl =(vl+dv_l_const)+dVel.sample() if vl >= v_max: vl = torch.ones(1)*v_max vl = vl[0] velocities.append(torch.stack(vel_temp, 0)) models.append(model_true) models_th = torch.stack(models, 0) velocities_th = torch.stack(velocities, 0) return models_th, velocities_th
9337f6f38194c91f52b88c8da13f87ab9ead9716
267,981
def my_add(argument1, argument2): """ adds two input arguments. Parameters ---------- argument1 : int, float, str input argument 1 argument2 : int, float, str input arguement 2 Returns ------- results : int, float or str the two added input arguments """ result = argument1 + argument2 return result
52c595ac6259f4fa59bb434317c5b89a1bdec428
137,503
def validate_sequence(sequence, sequence_type): """ This method is used to evalute the string passed to the Sequence constructor I used this https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=BlastHelp as reference for which bases and aminoacids representation are valid in FASTA files Parameters ------------- sequence : str The DNA/RNA/Protein sequence sequence_type : str The sequence type, it can be either 'DNA', 'RNA' or 'Protein' """ legal_values = [] if sequence_type == 'DNA': legal_values = ['A', 'G', 'C', 'W', 'S', 'M', 'K', 'R', 'Y', '-', 'B', 'V', 'D', 'H', 'N', 'T'] #list of possible Bases elif sequence_type == 'RNA': legal_values = ['A', 'G', 'C', 'W', 'S', 'M', 'K', 'R', 'Y', '-', 'B', 'V', 'D', 'H', 'N', 'U'] elif sequence_type == 'Protein': legal_values = [chr(i) for i in range(65, 91)] #again, this is based on the way in which aminoacids are represented in FASTA files legal_values.remove('J') legal_values.append('-') legal_values.append('*') for base in sequence: if base not in legal_values: return False return True
5ac5edecd8209f9d2fa48bc348e45e67c225c615
450,197
def object_list_check_unique_attribute(object_list,attr_name): """ check if all the objects in the list have the same attribute value. """ unique = True attr0 = object_list[0].__getattribute__(attr_name) for obj in object_list[1:]: if obj.__getattribute__(attr_name) == attr0: pass else: unique = False break return unique
8f8e8f8d9163b9fc9d2aba08f1a5670b6c55fad2
176,716
from typing import Counter def bag_of_ngrams(msg, n): """ Extract a bag of word ngrams from a message, with fixed n :param msg: input string :param n: size of ngram :return: bag of features, as a Counter """ if n == 0: raise ValueError('n must be a positive integer') words = msg.split() if n > len(words): return Counter() else: return Counter(('ngram', tuple(words[i:i+n])) for i in range(len(words)-n+1))
8b7867df2de8ce7026adcbbc865665759057e617
292,275
def norn(x): """ return the next or None of an iterator """ try: return next(x) except StopIteration: return None
cb6cd9ff9de36e83fdb41458ccf39e3d566b7fc6
543,209
def get_optimal_value(capacity, items): """ get_optimal_value function implements an algorithm for the fractional knapsack problem :param capacity: Number represents the total capacity of the bag :param items: list of list that contains values and weights of each item, where items[𝑖] = [value(𝑖), weight(𝑖)] :return: decimal number represents the maximal value of fractions of items that fit into the bag of weight capacity. """ out = 0 # sort all items by their price_per_unit items.sort(key=lambda x: x[0]/x[1], reverse=True) for v, w in items: can_fit = capacity - w # if the element can fit into the bag, take the whole item. if can_fit >= 0: out += v capacity = can_fit # otherwise, take as much of the item's weight as possible (price_per_unit * capacity). else: out += (v/w) * capacity return out return out
6501672bec4c0860a0b47c36bd074dffb3a2826a
74,263
def mulrowcol(row, col, K): """ Multiplies two lists representing row and column element-wise. Gotcha: Here the column is represented as a list contrary to the norm where it is represented as a list of one element lists. The reason is that the theoretically correct approach is too expensive. This problem is expected to be removed later as we have a good data structure to facilitate column operations. Examples ======== >>> from sympy.matrices.densearith import mulrowcol >>> from sympy import ZZ >>> a = [ZZ(2), ZZ(4), ZZ(6)] >>> mulrowcol(a, a, ZZ) 56 """ result = K.zero for i in range(len(row)): result += row[i]*col[i] return result
448aec2928b314cabe707f19f69bfd687b8fe31c
635,331
def pairwise_analogy_func(wrap, a1, b1, a2, b2, weight_direct, weight_transpose): """ Rate the quality of the analogy a1 : b1 :: a2 : b2. """ va1 = wrap.get_vector(a1) vb1 = wrap.get_vector(b1) va2 = wrap.get_vector(a2) vb2 = wrap.get_vector(b2) value = ( weight_direct * (vb2 - va2).dot(vb1 - va1) + weight_transpose * (vb2 - vb1).dot(va2 - va1) + vb2.dot(vb1) + va2.dot(va1) ) return value
6f96c26fb4c17d0006ab6888225dcc1f4bdc4391
67,833
def valid_cards_syntax(cards_str): """ Confirm that only numeric values separated by periods was given as input """ cards = cards_str.split('.') for c in cards: if not c.isnumeric(): return 'Cards must contain only digits 0-9 separated by periods' return None
8e3811505075269d2b1a37751c14017e107ce69b
41,597
def find_last_break(times, last_time, break_time): """Return the last index in times after which there is a gap >= break_time. If the last entry in times is further than signal_separation from last_time, that last index in times is returned. Returns -1 if no break exists anywhere in times. """ imax = len(times) - 1 # Start from the end of the list, iterate backwards for _i in range(len(times)): i = imax - _i t = times[i] if t < last_time - break_time: return i else: last_time = t return -1
01d883bc0c67022a32397f87703acf109f95bd55
671,665
def create_qualification(conn, name: str, keywords: str, description: str, auto_granted: bool = True, auto_granted_value: int = 100) -> str: """ Creates a new qualification for a task. @returns the qualification type id. """ response = conn.create_qualification_type( Name=name, Keywords=keywords, Description=description, QualificationTypeStatus='Active', AutoGranted=auto_granted, AutoGrantedValue=auto_granted_value, ) return response['QualificationType']['QualificationTypeId']
69f07f96ad948c2f8d5b557f49a33f9f4c056889
142,498
def _boundaries_overflow(index: int, radius: int, length: int) -> bool: """Checks boundaries defined by the index of the center and the radius are inside the sequence of given length. Returns True if boundaries are already outside the sequence, False otherwise. """ return ((index + radius) >= length) or ((index - radius) < 0)
45123b2668fb77d48ce638f77e734582bd5ac6d6
196,920
def zone_target_temperature(zone): """ Get target temperature for this zone """ return zone["targettemperature"]
380235b8fdbb9e50239862d3243857edb2d3258a
292,559
def breguet_propellant_winged_powered(R_cruise, v_cruise, lift_drag, I_sp_ab): """Recovery propellant factor P for winged vehicle with air-breathing propulsion. See Breguet range equation http://web.mit.edu/16.unified/www/FALL/thermodynamics/notes/node98.html Arguments: R_cruise (scalar): cruise range [units: meter]. v_crusie (scalar): cruise speed [units: meter second**-1]. lift_drag (scalar): Vehicle lift/drag ratio [units: dimensionless]. I_sp_ab (scalar): Air-breathing propulsion specific impulse [units: second]. Returns: scalar: propellant factor P. The recovery vehicle will burn `e**P -1` times its inert mass in propellant during the recovery cruise. """ return R_cruise / (v_cruise * lift_drag * I_sp_ab)
ab5e4cf2159bcd612a6c4cb5a1f062cb4c3910db
132,609
def spaces(num: int): """Utility function to easily indent strings.""" return " " * num
7a346bde1344bdf66ac5084c4f018c33f0707c90
547,793
def read_predictions_from_file(filename): """ Reader for the gold file and the template output file. Return values are four arrays with article ids, labels (or ? in the case of a template file), begin of a fragment, end of a fragment. """ articles_id, span_starts, span_ends, gold_labels = ([], [], [], []) with open(filename, "r") as f: for row in f.readlines(): article_id, gold_label, span_start, span_end = row.rstrip().split("\t") articles_id.append(article_id) gold_labels.append(gold_label) span_starts.append(span_start) span_ends.append(span_end) return articles_id, span_starts, span_ends, gold_labels
1b7663d11fa341d72a3f2f8733f3625b1d096324
612,376
import itertools def nflatten(iterable, n=1): """ Flattens an iterable n times. Args: iterable: The list to flatten. n: The number of times the list should be flattened. Returns: A generator yielding the flattened list. Note: Will also flatten strings. """ for _ in range(n): iterable = itertools.chain.from_iterable(iterable) return iterable
0f0b73ecdf34c997c9b2a237c074e55fe458981c
397,146
from datetime import datetime def estimate_time_shift_from_server_to_user(hh, mm): """ If it's negative means usually server is west of user :param hh: hours :param mm: minutes :return: minutes of shift (rounded to 30) """ now = datetime.now() my_hh, my_mm = now.hour, now.minute diff = ((my_hh - hh) * 60 + my_mm - mm) / 30.0 return int(round(diff) * 30.0)
2c7e9b98ca6053127ec98d676c6841639afde99a
320,606
def discount(rewards, impatience): """Sum the rewards, where far off rewards are devalued by impatience. For example, if rewards were [1, 1, 1, 1] and impatience was 0.5, returns: sum([1, 0.5, 0.25, 0.125]) Args: rewards (list): list of rewards to accumulate impatience (float): factor to discount the rewards at """ total, factor = 0, 1.0 for r in rewards: total += r * factor factor *= impatience return total
bffd30a9133b2cb4b6a596fc060e01d7f0333efd
93,813
from typing import List def mean(nums: List) -> float: """ Find mean of a list of numbers. Wiki: https://en.wikipedia.org/wiki/Mean >>> mean([3, 6, 9, 12, 15, 18, 21]) 12.0 >>> mean([5, 10, 15, 20, 25, 30, 35]) 20.0 >>> mean([1, 2, 3, 4, 5, 6, 7, 8]) 4.5 >>> mean([]) Traceback (most recent call last): ... ValueError: List is empty """ if not nums: raise ValueError("List is empty") return sum(nums) / len(nums)
3c802b4967f646b6338e52b4ce12977274054c15
708,627
def calc_iou(box_a, box_b): """ Calculate the Intersection Over Union of two boxes Each box specified by upper left corner and lower right corner: (x1, y1, x2, y2), where 1 denotes upper left corner, 2 denotes lower right corner Returns IOU value """ # Calculate intersection, i.e. area of overlap between the 2 boxes (could be 0) # http://math.stackexchange.com/a/99576 x_overlap = max(0, min(box_a[2], box_b[2]) - max(box_a[0], box_b[0])) y_overlap = max(0, min(box_a[3], box_b[3]) - max(box_a[1], box_b[1])) intersection = x_overlap * y_overlap # Calculate union area_box_a = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]) area_box_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) union = area_box_a + area_box_b - intersection iou = intersection / union return iou
c330af6b6b213c5086fa8df079228ee214407455
642,234
def jaccard(s1, s2): """ Calculate the Jaccard *distance* between two sets (the length of the intersection / the length of the union). If either or both sets are empty the distance is 1. Note that this returns a distance -- 0 means things are similar, 1 means things are different :param s1: Set one :type s1: set :param s2: Set two :type s2: set :return: The Jaccard distance :rtype: float """ if len(s1) == 0 or len(s2) == 0: return 1 return 1 - 1.0 * len(s1.intersection(s2)) / len(s1.union(s2))
989a5a4e26f20e501ec79c1fa6bad93c5941bf1b
209,469
def handle_sketch_name(msg): """Process an internal sketch name message.""" if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].sketch_name = msg.payload msg.gateway.alert(msg) return None
2faae0aca4255a6439ea40efef5d2ae8a4fd4522
417,228
from datetime import datetime def _get_last_commits(repo, n): """ Returns list [time, commit_message] of last n commits to repo. """ log = [] log_ref = repo.head.reference.log() for x in range(1, n+1): if len(log_ref) < x: break log_entry = log_ref[-x] if log_entry: log.append([datetime.fromtimestamp(log_entry.time[0]), log_entry.message]) return log
4c0d1a6c509a5a0faf6d660a1e8bd52fc8a77372
358,248
def pad_packed_images(packed_images, pad_value=0., snap_size_to=None): """Assemble a padded tensor for a `PackedSequence` of images with different spatial sizes This method allows any standard convnet to operate on a `PackedSequence` of images as a batch Parameters ---------- packed_images : PackedSequence A PackedSequence containing N tensors with different spatial sizes H_i, W_i. The tensors can be either 2D or 3D. If they are 3D, they must all have the same number of channels C. pad_value : float or int Value used to fill the padded areas snap_size_to : int or None If not None, chose the spatial sizes of the padded tensor to be multiples of this Returns ------- padded_images : torch.Tensor A tensor with shape N x C x H x W or N x H x W, where `H = max_i H_i` and `W = max_i W_i` containing the images of the sequence aligned to the top left corner and padded with `pad_value` sizes : list of tuple of int A list with the original spatial sizes of the input images """ if packed_images.all_none: raise ValueError("at least one image in packed_images should be non-None") reference_img = next(img for img in packed_images if img is not None) max_size = reference_img.shape[-2:] ndims = len(reference_img.shape) chn = reference_img.shape[0] if ndims == 3 else 0 # Check the shapes and find maximum spatial size for img in packed_images: if img is not None: if len(img.shape) != 3 and len(img.shape) != 2: raise ValueError("The input sequence must contain 2D or 3D tensors") if len(img.shape) != ndims: raise ValueError("All tensors in the input sequence must have the same number of dimensions") if ndims == 3 and img.shape[0] != chn: raise ValueError("3D tensors must all have the same number of channels") max_size = [max(s1, s2) for s1, s2 in zip(max_size, img.shape[-2:])] # Optional size snapping if snap_size_to is not None: max_size = [(s + snap_size_to - 1) // snap_size_to * snap_size_to for s in max_size] if ndims == 3: padded_images = reference_img.new_full([len(packed_images), chn] + max_size, pad_value) else: padded_images = reference_img.new_full([len(packed_images)] + max_size, pad_value) sizes = [] for i, tensor in enumerate(packed_images): if tensor is not None: if ndims == 3: padded_images[i, :, :tensor.shape[1], :tensor.shape[2]] = tensor sizes.append(tensor.shape[1:]) else: padded_images[i, :tensor.shape[0], :tensor.shape[1]] = tensor sizes.append(tensor.shape) else: sizes.append((0, 0)) return padded_images, sizes
dc382533060baf2ff70dcc460e8f2ad5223f0a47
236,277
def uproot_to_numpy(uproot_hist): """ Convert an `uproot` histogram to a `numpy` histogram. Args: uproot_hist (hist): An uproot histogram Returns: Tuple of NumPy arrays: The converted `numpy` histogram """ # return values, edges return uproot_hist.to_numpy()
cdc87136f4acf5ee399c6ce6091e055ff1a81511
302,508
import random def pick_from_distribution(input_map): """ Generates an item from a distribution map :param input_map: The dictionary of items to their probabilities :return: An item from the dictionary """ rand = random.random() total = 0.0 for item, prob in input_map.iteritems(): total += prob if total > rand: return item # shouldn't get here if probabilities return random.choice(input_map.keys())
2ba62477bdc02c6f541d5f05100dee646d9b2f76
632,402
def urldecode(url): """Decode %7B/%7D to {}.""" return url.replace('%7B', '{').replace('%7D', '}')
91e2f969e59bc68004e1696434b5b0329012342f
26,473
def choose_first_not_none(*args): """ Choose first non None alternative in args. :param args: alternative list :return: the first non None alternative. """ for a in args: if a is not None: return a return None
fe3efba85251161cd0a6ecb50583cc443cd04dc0
1,103
def makeIntervals(evts): """From evts=[a,b,c], create a list of intervals: [(0,a), (b,c)] From evts=[a,b,c,d], create a list of intervals: [(0,a), (b,c), (d,1)]. Assumes all values in evts are in (0,1)""" if evts == []: return [(0.0, 1.0)] evts.sort() left = 0.0 intervals = [] for e in evts: if left >= 0.0: intervals.append((left, e)) left = -1.0 else: left = e if left >= 0.0 and left < 1.0: intervals.append((left, 1.0)) return intervals
093299058904966682e9df91d96c6cc7adc430f2
651,952
def generate_grid_coordinates(feature_length): """ A list is created with each element corrosponding to a coordinate of a feature_length x feature_length grid. The list is then broken up into roughly equal chunks so each chunk can be sent to a different core. Parameters ---------- feature_length : int The length of the feature space you want to explore. numtasks: The number of cores the grid is being divided between Returns ------- missing_feature_index : list Coordinate list that is brooken into chunks. """ missing_feature_index = [] for i in range(feature_length): for j in range(feature_length): missing_feature_index += [[i,j]] return missing_feature_index
43fe20bd2f046e660b541a5fd7ff5a67b18595f5
599,618
def make_nbr_dic(nbr_list): """ Make a dictionary that maps each atom to the indices of its neighbors. Args: nbr_list (torch.LongTensor): nbr list for a geometry Returns: nbr_dic (dict): dictionary described above """ nbr_dic = {} for nbr in nbr_list: nbr_0 = nbr[0].item() if nbr_0 not in nbr_dic: nbr_dic[nbr_0] = [] nbr_dic[nbr_0].append(nbr[1].item()) return nbr_dic
44aab0b700ec797bfdfaa8cbd30869794cbd9133
249,520
def get_last_usable_skill(skill_dict): """Returns the last usable skill contained by the input dictionary. Args: skill_dict: an ordered dictionary with scox.value.Skill objects as values. Returns: the last usable skill in skill_dict. """ usable = [] for s in skill_dict.keys(): if skill_dict[s].is_usable(): usable.append(s) if len(usable) > 0: return usable[-1] else: return None
575973188601810720bda3787f3ed0d2dcd4e923
455,921
import ast def filter_block(node_list): """ Remove no-op code (``pass``), or any code after an unconditional jump (``return``, ``break``, ``continue``, ``raise``). """ if len(node_list) == 1: return node_list new_list = [] for node in node_list: if type(node) == ast.Pass: continue new_list.append(node) if type(node) in (ast.Return, ast.Break, ast.Continue, ast.Raise): break if len(new_list) == len(node_list): return node_list else: return new_list
b88d3e4966e162d3e23e56e622ff47c63165b7e6
45,551
def humanbytes(B): """Return the given bytes as a human friendly KB, MB, GB, or TB string""" B = float(B) KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte') elif KB <= B < MB: return '{0:.2f} KB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GB'.format(B/GB) elif TB <= B: return '{0:.2f} TB'.format(B/TB)
ac23fcb827f38e4f2e23ca5572e73a46f6d2f632
441,684
def obj_to_dict(obj): """ Converts an :py:obj:`object` to a :py:obj:`dict` by taking the object's properties and variables and their values and putting them into a dict. Private and dunder (``__``) properties are ignored. The use case for this is to enable passing of an object's data across the app/task barrier in a format that is serializable by a JSON-based serializer. Args: obj: An opaque object Returns: dict: A :py:obj:`dict` that contains the attributes from ``obj``. """ return { attr:getattr(obj, attr) for attr in dir(obj) if not attr.startswith('_') }
0c7a8d758357dcd7f33b0351004fe29354d0134e
22,881
def default_range(start, stop): """[start, start + 1, ..., stop -1]""" return range(start, stop)
98a882164b916090f60fd8168b85b4aca3b031e8
250,782
def null(shape): """Return the shape as-is.""" return shape
956c209a33312c235748a2b4a433955404e5f641
288,303
def bxor(inp, key): """ Preforms the bitwise xor on an array of bytes. """ return bytes([inp[i] ^ key[i % len(key)] for i in range(len(inp))])
e12078b24d52aa785afbd118ed48157bedc7926b
529,238
from typing import Any def binary_search(L: list, v: Any) -> int: """Return the index of the first occurrence of value in L, or return -1 if value is not in L. >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 1) 0 >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 4) 2 >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 5) 4 >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 10) 7 >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], -3) -1 >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 11) -1 >>> binary_search([1, 3, 4, 4, 5, 7, 9, 10], 2) -1 >>> binary_search([], -3) -1 >>> binary_search([1], 1) 0 """ # Mark the left and right indices of the unknown section. i = 0 j = len(L) - 1 while i != j + 1: m = (i + j) // 2 if L[m] < v: i = m + 1 else: j = m - 1 if 0 <= i < len(L) and L[i] == v: return i else: return -1
f844dc94aa3beab375beb051bb8a7a90baeb85fe
92,613
import logging import requests def request_playlist_html(playlist_url, headers): """Send a request to a YouTube playlist webpage. Parameters ---------- playlist_url : str URL to the playlist webpage. headers : Dict[str, str] HTTP request headers. Returns ------- str Response HTML """ logging.info("Requesting YouTube playlist at %s", playlist_url) response = requests.get(playlist_url, headers=headers) return response.text
c6471d8330ea1988a0ad04d2f91d9a4ae8aefd07
372,871
def check_std(df, num_cols, threshold = 0.01): """ Return column names, whose standard deviation is less than the threshold""" stds = dict(df.std()) cols = [k for k, v in stds.items() if v < threshold] return cols
fc5a554da7ea37ff5248ceeba2637e3c13ee7e94
488,965
def inherits_from(obj, parent): """ Takes an object and tries to determine if it inherits at *any* distance from parent. Args: obj (any): Object to analyze. This may be either an instance or a class. parent (any): Can be either instance, class or python path to class. Returns: inherits_from (bool): If `parent` is a parent to `obj` or not. Notes: What differs this function from e.g. `isinstance()` is that `obj` may be both an instance and a class, and parent may be an instance, a class, or the python path to a class (counting from the evennia root directory). """ if callable(obj): # this is a class obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.mro()] else: obj_paths = ["%s.%s" % (mod.__module__, mod.__name__) for mod in obj.__class__.mro()] if isinstance(parent, str): # a given string path, for direct matching parent_path = parent elif callable(parent): # this is a class parent_path = "%s.%s" % (parent.__module__, parent.__name__) else: parent_path = "%s.%s" % (parent.__class__.__module__, parent.__class__.__name__) return any(1 for obj_path in obj_paths if obj_path == parent_path)
9d7e0665b4e4fe2a3f7c136436a2502c8b72527c
706,396
def last_posted_user_name(ticket): """ Get the username of the last post created :param ticket: The requested ticket to get last post for :return: username of last post """ last_post = ticket.posts.all().order_by('created_at').last() return last_post.user.username
904f35f8923422f35c970a5efc452394a7deb12b
671,879
import functools def fluent(func): """Decorator for class methods which forces return of self. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): func(self, *args, **kwargs) return self return wrapper
15d702971d877f4a04609adf94019c9696827bb3
265,238
def _surf70(phi1, phi2, phi3, phi4): """Compute area of a south fire trapeze Parameters ---------- phi1 : float Level-set at south west point phi2 : float Level-set at south east point phi3 : float Level-set at north east point phi4 : float Level-set at north west point Returns ------- float SGBA """ return 0.5 * ((0.5 - phi1) / (phi4 - phi1) + (0.5 - phi2) / (phi3 - phi2))
c4f166bccd0a290b37469f3f616bd568f90f1259
667,575
def make_text_from_orth(example_dict: dict) -> str: """ Reconstructs the text based on ORTH and SPACY from an Example turned to dict """ text = "" for orth, spacy in zip( example_dict["token_annotation"]["ORTH"], example_dict["token_annotation"]["SPACY"], ): text += orth if spacy: text += " " return text
73cd17f1cf9a0fcd9d7f29d731f8b737822afd82
529,871
def class_text_to_int(cls_name, label_map): """ Get index of class name :param cls_name: name of class :param label_map: label map :return: index of class if found """ if cls_name in label_map: return label_map[cls_name] raise ValueError('Invalid class')
ee5b15982369609173098d8ceaedc2bebe1425ec
461,439
def get_filename_in_second_line(cpp_txt): """ From the second line of cpp_txt, get the filename Expected input argument: // ``` // Begin file_name.cpp // ... Expected output argument in this case: file_name.cpp """ result = "" for line in cpp_txt.splitlines(): if line.strip().lower().startswith('//'): # ignore if too short if 2 < len(line.lower().split()): # If second word is 'begin' if 'begin' == line.lower().split()[1]: result = line.lower().split()[-1] break return result
b0e3e3efe7af0af667b20c74c1334c3d61f74ebf
578,152
from pathlib import Path def read_image_scipy2(input_filename: Path) -> np.array: # type: ignore """ Read an image file with scipy and return a numpy array. :param input_filename: Source image file path. :return: numpy array of shape (H, W), (H, W, 3). """ numpy_array = imageio.imread(input_filename).astype(np.float) # type: ignore return numpy_array
54d3aa7d8a3043e5a79e2668be1e971b543669d9
10,780
def _iget(key, lookup_dict): """ Case-insensitive search for `key` within keys of `lookup_dict`. """ for k, v in lookup_dict.items(): if k.lower() == key.lower(): return v return None
f53e1723a015e5ac0a1244ee0821d56f4a26c5d8
576,508
from typing import Tuple from typing import Sequence import math def _conv2d_output_size( input_dimensions: Tuple[int, int], output_channels: Sequence[int], kernel_sizes: Sequence[int], stride_sizes: Sequence[int], ) -> int: """Calculates the output size of convolutional layers based on square kernel sizes and square stride sizes. This does not take into account padding or dilation. H_out = \floor((H_in - kernel_size) / stride_size + 1) W_out = \floor((W_in - kernel_size) / stride_size + 1) Args: input_dimensions (Tuple[int, int]): The input dimension in the form of (height, width). output_channels (Sequence[int]): Number of output channels for each layer of convolutions. kernel_sizes (Sequence[int]): The kernel size for each layer of convolutions. stride_sizes (Sequence[int]): The stride size for each layer of convolutions. Returns: int: The output size of the convolutional layers. """ assert len(output_channels) == len(kernel_sizes) == len(stride_sizes) assert len(input_dimensions) == 2 input_height = input_dimensions[0] input_width = input_dimensions[1] for kernel_size, stride_size in zip(kernel_sizes, stride_sizes): input_height = math.floor((input_height - kernel_size) / stride_size + 1) input_width = math.floor((input_width - kernel_size) / stride_size + 1) return input_height * input_width * output_channels[-1]
f22b6f8c4e8f8467960915f7a91a47f779ac67b9
453,365
def bmul(vec, mat, axis=0): """Expand vector for batchwise matrix multiplication. Parameters ---------- vec : 2dtensor vector for multiplication mat : 3dtensor matrix for multiplication axis : int, optional batch axis, by default 0 Returns ------- 3dtensor Product of matrix multiplication. (bs, n, m) """ mat = mat.transpose(axis, -1) return (mat * vec.expand_as(mat)).transpose(axis, -1)
78fc47c98bd10792efeeea8df073e52921e4c9ac
95,527
import struct def enc_float(val): """Encode a single float""" return struct.pack("!f", val)
f4d6d3fff683c3b64dcebc97c48b4ab8e3815f91
50,417
from typing import Union from typing import Callable from typing import Any from typing import Optional from typing import Tuple from typing import Dict def format_callback_to_string( callback: Union[str, Callable[..., Any]], args: Optional[Tuple[Any, ...]] = None, kwargs: Optional[Dict[str, Any]] = None, ) -> str: """ Convert a callback, its arguments and keyword arguments to a string suitable for logging purposes. Arguments: callback: The callback function Keyword Arguments: args: The callback arguments kwargs: The callback keyword arguments Returns: str: The formatted callback string """ callback_str = None if not isinstance(callback, str): try: callback_str = '{0}('.format(callback.__qualname__) except AttributeError: callback_str = '{0}('.format(callback.__name__) else: callback_str = '{0}('.format(callback) if args: callback_str += ', '.join([repr(arg) for arg in args]) if kwargs: if args: callback_str += ', ' callback_str += ', '.join(['{0}={1}'.format(k, v) for k, v in kwargs.items()]) callback_str += ')' return callback_str
2e07074dac0d7035f63bd192369cf2aa0ce8b410
437,883
import re def get_major_version(from_go_version: str) -> str: """ Extracts the "major" version part of a full Go release version. ("major" to the Go project is the part of the version that most people think of as the major and minor versions - refer to examples). >>> get_major_version("1.23.45-6rc7") '1.23' >>> get_major_version("1.2.3") '1.2' >>> get_major_version("not a release version") '' """ match = re.search(pattern=r'^\d+\.\d+', string=from_go_version) if match: return match.group(0) return ""
6f879cc4141f6de0a8902ae88996d65296aa6634
452,605
def doize(*, tock=0.0, **opts): """ Returns decorator that makes decorated generator function Doist compatible. Imbues decorated generator function with attributes used by Doist.enter() or DoDoer.enter(). Only one instance of decorated function with shared attributes is allowed. Usage: @doize def f(): pass Parameters: tock is default tock attribute of doized f opts is dictionary of remaining parameters that becomes .opts attribute of doized f """ def decorator(f): f.done = None # default done state f.tock = tock # default tock attributes f.opts = dict(opts) # default opts attribute return f return decorator
bbdbfb47a56826b815585cdd1d3b515dc3ad8f03
451,097
import re def subs_text(text, lst): """Same as replaceChars but uses a list instead of a dictionary. Args: text: The text that will be substitute lst: A list that contains regexes for denoising text Returns: text: New text without garbages """ for n in range(0,len(lst)): text = re.sub(lst[n][0], lst[n][1], text, flags=re.DOTALL) return text
2ccb16198fdfad5a7bb6d89c28e2511b7218de83
290,510
def compute_amnesty_flags(app_config, curr_date): """Helper function to determine whether the date falls within amnesty eval or amnesty period.""" in_amnesty_eval_period = True if app_config.amnesty_config.amnesty_enabled and \ curr_date <= app_config.amnesty_config.evaluation_period_end_date else False in_amnesty_period = True if app_config.amnesty_config.amnesty_enabled and \ curr_date > app_config.amnesty_config.evaluation_period_end_date and \ curr_date <= app_config.amnesty_config.amnesty_period_end_date else False return in_amnesty_eval_period, in_amnesty_period
da35675436c4aa0fd648354cb848b526282d112b
398,350
def _shape(ys): """ Get the shape of a non-numpy python array. This assumes the first index of every dimension is indicative of the shape of the whole matrix. Examples: >>> _shape([1, 2, 3]) [3] >>> _shape([[1, 2, 3], [4, 5]]) [2, 3] """ if hasattr(ys, '__len__'): return [len(ys)] + _shape(ys[0]) else: return []
66264e78f61198bc2eff27aa54d2f1caed8e0b58
281,482
def notIn(i, subset): """Returns True if i-th bit in the subset is not set.""" return (1 << i) & subset == 0
04f3fce32260123d8da947fca5159cc83852ed07
168,679
def mappings_intersect(a, b): """ True if `a` and `b` share a mapping. Mapping is a list of ((key, value), (mapping1, mapping2,...)). >>> mappings_intersect([(('waterway', 'riverbank'), ('mapping_waterareas',))], ... [(('waterway', 'riverbank'), ('mapping_waterareas',))]) True >>> mappings_intersect([(('waterway', 'riverbank'), ('mapping_waterareas',))], ... [(('place', 'island'), ('mapping_landusage',))]) False >>> mappings_intersect([(('waterway', 'riverbank'), ('mapping_waterareas',))], ... [(('place', 'island'), ('mapping_landusage',)), ... (('waterway', 'riverbank'), ('mapping_waterareas',))]) True """ for a_key_val, a_mappings in a: for a_map in a_mappings: for b_key_val, b_mappings in b: for b_map in b_mappings: if a_key_val == b_key_val and a_map == b_map: return True return False
a8a5ebda37325d8dc8a074f26be922ed41f28505
399,462
def chunks(l, n): """ returns list of list of length n. E.g. chunks([1, 2, 3, 4, 5], 2) returns [[1, 2], [3, 4], [5]] """ return [l[i:i+n] for i in range(0, len(l), n)]
24e71aca504e1fe5c25c95026e4ac8ca8cb047df
150,871
def find_index_from_freq(freq, frequency_step): """ Gives the index corresponding to a specific frequency (eg to find a frequency from a fourier transform list). Requires the frequency step of the list Arguments: freq - frequency being looked for frequency_step - the step between consecutive indexes """ index = int(round(freq/frequency_step)) return index
b909914f649b9bdc9b70571e1c148e8216213bbd
444,223
def merge_group_answers_with_count(file_content): """ Merges the group answers together and count the number of people in each group :param [str] file_content: Content from the input file :return: For each group, a long string of all the answers and the number of people :rtype: [(str, int)] """ group_info = [] merged_answer = "" people = 0 for line in file_content: if line != "": merged_answer += f"{line}" people += 1 else: group_info.append((merged_answer, people)) merged_answer = "" people = 0 group_info.append((merged_answer, people)) # Adding the last one return group_info
691a58dffe5e7f6f5f8ba79ce521c0ce23f29e86
489,082
def validate_encryptionoption(encryption_option): """ Validate EncryptionOption for EncryptionConfiguration Property: EncryptionConfiguration.EncryptionOption """ VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION = [ "CSE_KMS", "SSE_KMS", "SSE_S3", ] if encryption_option not in VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION: raise ValueError( "EncryptionConfiguration EncryptionOption must be one of: %s" % ", ".join(VALID_ENCRYPTIONCONFIGURATION_ENCRYPTIONOPTION) # NOQA ) return encryption_option
2e6c7df84fa8434c6692fc58c95bbb9ecd46d05d
496,928
import re from typing import Literal def extract_text( pattern: re.Pattern[str] | str, source_text: str, ) -> str | Literal[False]: """Match the given pattern and extract the matched text as a string.""" match = re.search(pattern, source_text) if not match: return False match_text = match.groups()[0] if match.groups() else match.group() return match_text
a6f762cfd26dd1231db4b6e88247e2566d186212
208
def rot_word(word): """Takes a 4-byte word and performs cyclic permutation. Aka one-byte left circular shift. [b0, b1, b2, b3] -> [b1, b2, b3, b0] """ return word[[1, 2, 3, 0]]
f89b3499009d29c7e3e9745e30f643b941c77f00
96,265
def positive_sum3(a: int, b: int, c: int=0): """Normal exposed function Parameters ---------- a: int b: int c: int, default 0 All parameters are positive values. If negative, an exception is raised. Returns ---------- int """ if a<0 or b<0 or c<0: raise Exception('Error: Negative argument') return a+b+c
008baaa83ef666181e83f56f167a3b4fa6ddd4e7
418,205
from pathlib import Path def requirements_path() -> Path: """ Return the absolute Path to 'tests/requirements' """ return Path(__file__).parent / Path('requirements')
d20bf1c50fe0881238435641f782a3975a82c1f7
366,614
from functools import reduce from operator import mul import torch def flat_softmax(inp): """Compute the softmax with all but the first two tensor dimensions combined.""" orig_size = inp.size() flat = inp.view(-1, reduce(mul, orig_size[2:])) flat = torch.nn.functional.softmax(flat, -1) return flat.view(*orig_size)
e3cbe2603daba7f044b49e28fb3505fb17f95fa6
474,805
import requests import json def get_metadata(metadata_url: str) -> dict: """Gets metadata from a jsonld published by AAFC Args: metadata_url (str): url to get metadata from. Returns: dict: AAFC Land Use Metadata. """ if metadata_url.endswith(".jsonld"): if metadata_url.startswith("http"): metadata_response = requests.get(metadata_url) jsonld_response = metadata_response.json() else: with open(metadata_url) as f: jsonld_response = json.load(f) geom_obj = next( (x["locn:geometry"] for x in jsonld_response["@graph"] if "locn:geometry" in x.keys()), [], ) geom_metadata = next( (json.loads(x["@value"]) for x in geom_obj if x["@type"].startswith("http")), None, ) if not geom_metadata: raise ValueError("Unable to parse geometry metadata from jsonld") description_metadata = [ i for i in jsonld_response.get("@graph") if "dct:description" in i.keys() ][0] metadata = { "geom_metadata": geom_metadata, "description_metadata": description_metadata, } return metadata else: # only jsonld support. raise NotImplementedError()
486cca39ea054ab36b9011591cd069a0835c608e
250,142
def qbytearray_to_str(qba): """Convert QByteArray object to str in a way compatible with Python 2/3""" return str(bytes(qba.toHex().data()).decode())
7b1a016d3b7469cfad287a646c3f2e0aa61c2552
88,235
def _floatFormat(value): """Format the floating number to make sure it gets the decimal point.""" valueStr = "%.16G" % value if "." not in valueStr and "E" not in valueStr: valueStr += ".0" return valueStr
5c322d41909e76860d5aa1ddfe8fcebcec0da711
443,942
def fix_thread_string(tstr): """ Takes a string with numbers separated by period and possibly with / at end, and outputs a string with 3 digit numbers separated by periods. """ remove_slash = lambda s: s[:-1] if s[-1] == '/' else s three_digits = lambda s: "%03d" % int(s) return '.'.join( map(three_digits, map(remove_slash, tstr.split('.'))))
fb0c4dd83a1446114835eb8758501b07e7392d4d
214,569
def lemmatizer_fun(lemmatizer, token, token_tag=None): """lemmatize token with tag of token in sentence if token has not tag lemmatized With no tag method Arguments: lemmatizer {obj} -- instance of WordNetLemmatizer token {str} -- token Keyword Arguments: token_tag {str} -- tag of token in sentence (default: {None}) Returns: str -- lemmatized token """ if token_tag is None: return lemmatizer.lemmatize(token) return lemmatizer.lemmatize(token, pos=token_tag)
a64597b3a5e3e76b31bb3c3e8f5cf6d34f892e12
587,642
def convert_boolean(value: bool) -> str: """Convert from python bool to postgresql BOOLEAN""" if value is True: return 'TRUE' elif value is False: return 'FALSE' else: return 'NULL'
102c9b4ce2db0d74f3d0838f09810b14737772fe
528,648
from typing import AbstractSet def add_to_set(set_: AbstractSet[str] | None, new: str) -> set[str]: """Add an entry to a set (or create it if doesn't exist). Args: set_: The (optional) set to add an element to. new: The string to add to the set. """ return set(set_).union([new]) if set_ is not None else {new}
ec1f6e3ca51bc11ff0996a1aab00e84e2c23228e
690,677
def get_last_logline(logfile: str) -> str: """Get last line of logfile Args: logfile: path to logfile Returns: last line of logfile """ line = "" with open(logfile) as file_handle: for line in file_handle: pass return line
92a97d380b3f6bad1a337da79350f57fc46ab556
350,449
def which(repository_ctx, cmd, default = None): """ A wrapper around repository_ctx.which() to provide a fallback value. Doesn't %-escape the value! Args: repository_ctx: The repository context. cmd: name of the executable to resolve. default: Value to be returned when such executable couldn't be found. Returns: absolute path to the cmd or default when not found. """ result = repository_ctx.which(cmd) return default if result == None else str(result)
369b00e3131831664cdd1c07e0bbbf66d28795a8
269,824
import pickle import glob def cluster_files_reader(files_pattern, trainer_count, trainer_id, loader=pickle.load): """ Create a reader that yield element from the given files, select a file set according trainer count and trainer_id :param files_pattern: the files which generating by split(...) :param trainer_count: total trainer count :param trainer_id: the trainer rank id :param loader: is a callable function that load object from file, this function will be called as loader(f) and f is a file object. Default is cPickle.load """ def reader(): if not callable(loader): raise TypeError("loader should be callable.") file_list = glob.glob(files_pattern) file_list.sort() my_file_list = [] for idx, fn in enumerate(file_list): if idx % trainer_count == trainer_id: print("append file: %s" % fn) my_file_list.append(fn) for fn in my_file_list: with open(fn, "r") as f: lines = loader(f) for line in lines: yield line return reader
abeeb739df94b21d0b94de485ba6cf01c6dae8e9
190,694
def get_backbone_atoms(structure): """ Return the backbone atoms of the specified structure. Parameters ---------- structure : Bio.PDB.Structure The structure. Returns ------- list of Bio.PDB.Atom The backbone atoms. """ atoms = [] for model in structure: for chain in model: for residue in chain: if 'CA' in residue: atoms.append(residue['CA']) if 'C' in residue: atoms.append(residue['C']) if 'N' in residue: atoms.append(residue['N']) return atoms
791c120b88329c81afb1578964e1e3b501abc47c
485,641
import re def count_aspects(mask: list): """Counts the number of keywords (runs) in a given aspect mask Example: - input: [1,1,0,0,1,0] - output: 2 Arguments: - mask: list of booleans (returned by function create_aspectmask) Return: Positive integer """ mask_str = "".join([str(x) for x in mask]) return len(re.findall(r"^1|01", mask_str))
ad1ebfc54bcc2dc6440e431bc2454dc64e2d69c4
333,680
from pathlib import Path from typing import List import re def get_amici_base_sources( base_dir: Path, with_hdf5: bool = True ) -> List[str]: """Get list of source files for the amici base library Expects that we are inside $AMICI_ROOT/python/sdist Arguments: base_dir: AMICI base dir containing ``src/`` and ``include/`` with_hdf5: compile with HDF5 support """ amici_base_sources = (base_dir / 'src').glob('*.cpp') amici_base_sources = [ str(src) for src in amici_base_sources if not re.search(r'(matlab)|(\.(ODE_)?template\.)', str(src)) ] if not with_hdf5: hdf5_cpp = base_dir / 'src' / 'hdf5.cpp' try: # sometimes this fails for unknown reasons... amici_base_sources.remove(str(hdf5_cpp)) except ValueError: print(f'Warning: could not find {hdf5_cpp} in ' f'{amici_base_sources}') return amici_base_sources
62f0f10dbffff57864a75725f79749636f70a68a
320,420
def _reset_time_for_date(date): """ Set time on a datetime to 00:00:00 """ return date.replace(hour=0, minute=0, second=0, microsecond=0)
2333115d6aa427d3e0cf4a77668981234abbbd27
423,273
def reorder_columns(df, order): """Reorders dataframe columns, sorting any extra columns alphabetically.""" extra_cols = set(df.columns) - set(order) return df[list(order) + sorted(extra_cols)]
41e117de52a909925197738df066da2e278bb915
403,314
def parse_qualifiers(gb_feature): """Parse the GBFeature_quals block of the GBFeature entry and create a dictionary of qualifiers using the GBQualifier_name as a key. Args: gb_feature: Reference to GBFeature block from Entrez XML Kargs: None Raises: None """ qualifiers = {} #print "*******GBQualifier: {}\n".format(gb_feature['GBFeature_quals']) for qual in gb_feature['GBFeature_quals']: if qual['GBQualifier_name'] not in qualifiers: qualifiers[qual['GBQualifier_name']] = [] qualifiers[qual['GBQualifier_name']].append(qual['GBQualifier_value']) return qualifiers
c76332a86a71b30d354800478eb7f2b4702a7efd
451,569
import threading def makeThreadSafe(function, lock=None): """ Call with a function you want to make thread safe Call without lock to make the function thread safe using one lock per function. Call with existing lock object if you want to make several functions use same lock, e.g. all functions that change same data structure. @param function: function to make thread safe @param lock: threading.Lock instance or None @rtype: function @return: function decorated with locking """ if lock is None: lock = threading.Lock() def decorated(*args, **kw): lock.acquire() try: return function(*args, **kw) finally: lock.release() return decorated
dde480d751b867e442fcef4263d6d5e5b9da1f37
72,102
def read_lines_from_text_file(file_path): """Read lines from a text file.""" with open(file_path) as f: lines = [line.strip() for line in f.readlines()] return lines
95a1592a20d4e83a62def2f8aa8f20633e1024a6
23,223
def preprocess_user_data(user_id, ratings_df): """ Build a Surprise test set with book ids of the books that have not been rated by a given user. """ rated_books = set(ratings_df.loc[ratings_df['user_id'] == user_id, 'book_id']) all_books = set(ratings_df['book_id']) books_unknown_rating = sorted(all_books - rated_books) # Create a test set for the user so that it can be used to predict unknown ratings user_testset = [] for i in books_unknown_rating: # Some value should be passed as a true rating # Here, 3.92 is used - mean rating (this value will not affect the prediction) user_testset.append((user_id, i, 3.92)) return user_testset
868f4ebf43a3e1bade5d996c9409e9b77b834ada
242,933
def categorical_error(pred, label): """ Compute categorical error given score vectors and labels as numpy.ndarray. """ pred_label = pred.argmax(1) return (pred_label != label.flat).mean()
577df5b9dd7835334084eb65d9789118e6951e7d
497,941
def title_to_snake_case(text): """Converts "Column Title" to column_title """ return text.lower().replace(' ', '_').replace('-', '_')
72893c308cd772cc972f76199ee0e32ce5f7c92b
100,759