content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import hashlib def calculate_variant_md5(chromosome, position, reference, alternate): """Calculate MD5 hash for a variant Args: chromosome (str): Chromosome position (int): Genomic position reference (str): Reference allele alternate (str): Alternate allele Returns: str: MD5 hash for a variant """ key = '|'.join(list(map(str, [chromosome, position, reference, alternate]))) return hashlib.md5(key.encode('utf-8')).hexdigest()
36a201f05a8e2a09c2e567acf02089d01462248c
15,769
from typing import Optional from typing import Dict def parse_encryption(bucket: str, encryption: Optional[Dict]) -> Optional[Dict]: """ Parses the S3 default encryption object and returns a dict of the relevant data """ # Encryption object JSON looks like: # { # 'ServerSideEncryptionConfiguration': { # 'Rules': [ # { # 'ApplyServerSideEncryptionByDefault': { # 'SSEAlgorithm': 'AES256'|'aws:kms', # 'KMSMasterKeyID': 'string' # }, # 'BucketKeyEnabled': True|False # }, # ] # } # } if encryption is None: return None _ssec = encryption.get('ServerSideEncryptionConfiguration', {}) # Rules is a list, but only one rule ever exists try: rule = _ssec.get('Rules', []).pop() except IndexError: return None algorithm = rule.get('ApplyServerSideEncryptionByDefault', {}).get('SSEAlgorithm') if not algorithm: return None return { "bucket": bucket, "default_encryption": True, "encryption_algorithm": algorithm, "encryption_key_id": rule.get("ApplyServerSideEncryptionByDefault", {}).get('KMSMasterKeyID'), "bucket_key_enabled": rule.get('BucketKeyEnabled'), }
d5d03426bb5125ca04b4ba4af12209e5a6ba7fe1
412,773
def get_confidence_outcome(tps, fps, fns, negative_ids): """ Determines whether prediction with a given confidence is true or false based on the TPs and FPs lists Parameters ---------- tps, fps : list of tuple A list of predicted TP(FP)s in format (slice_id, Adhesion, confidence) fns : list of tuple A list of predicted FNs in format (slice_id, confidence) negative_ids : list of str A list of negative ids Returns ------- outcomes : list A list of tuple of confidence and whether its prediction is true outcomes_negative : list A list of tuple of confidence and whether its prediction is true for negative slices only """ outcomes = [] outcomes_negative = [] for _, _, confidence in tps: outcomes.append((1, confidence)) for slice_id, _, confidence in fps: outcomes.append((0, confidence)) if slice_id in negative_ids: outcomes_negative.append((0, confidence)) for _ in fns: outcomes.append((1, 0)) return outcomes, outcomes_negative
8db305b0b9edabc1f8949570514c518d01cdc400
584,490
def mixin_enabled(plugin, key, *args, **kwargs): """ Return if the mixin is existant and configured in the plugin """ return plugin.mixin_enabled(key)
0d89dbbc381d875d2b5f401635d74e4403267269
684,833
def findall_name_value(xml, name, value): """Find all xml objects at any depth with the given 'name' and 'value'""" return xml.findall('.//*/[@name="'+str(name)+'"]/[@value="'+str(value)+'"]')
c812bb7591dbdd19be34a21924dce44292be3c35
234,172
def find_best_stock_profit(stock_prices): """Takes in a list of stock prices and returns the best possible profit possible from buying and selling""" if type(stock_prices) != list: raise TypeError( "The argument for find_best_stock_profit must be of type list.") elif len(stock_prices) < 1: return 0 purchase_price = stock_prices[0] highest_profit = 0 for i in range(1, len(stock_prices)): current_price = stock_prices[i] if current_price - purchase_price < 0: purchase_price = current_price elif current_price - purchase_price > highest_profit: highest_profit = current_price - purchase_price return highest_profit
3cc9fcbb160599f22cddaf07146f6ef9612af918
140,995
def dict_to_str(d): """ Given a dictionary d, return a string with each entry in the form 'key: value' and entries separated by newlines. """ vals = [] for k in d.keys(): vals.append('{}: {}'.format(k, d[k])) v = '\n'.join(vals) return v
a2c3c87715ccdacafb76dba57050ea2b942ce0d2
56,625
def __edge_exists__(i, j, g): """ Checks if the edge i --> j exists in the graph, g. :param i: Index of a node. :param j: Index of a node. :param g: Graph. :return: A boolean indicating if j is a successor of i. """ return j in list(g.successors(i))
00cb1fb0bb6f2fffb1f6359c9a8fdd2afd939652
34,987
def _node_to_name(node, event_graph): """Create a name string for a given node in the event graph. """ return "{}@{}".format(event_graph.property(node, "name", ""), event_graph.property(node, "tag", ""))
5d80a3566416f5cfa9df69f4f4ec1b03da172ff0
179,875
import math def to_window_length_for_2min_cadence(length_day): """Helper for LightCurve.flatten(). Return a `window_length` for the given number of days, assuming the data has 2-minute cadence.""" res = math.floor(720 * length_day) if res % 2 == 0: res += 1 # savgol_filter window length must be odd number return res
1fa982eb799465a9f77dec19103f67d41b6f5474
447,925
def get_command_eof(command: str, eof_prefix: str = "EOF") -> str: """ Determine a safe end-of-file keyword to use for a given command to wrap. """ index = 0 eof = eof_prefix lines = command.split("\n") while True: if eof in lines: index += 1 eof = eof_prefix + str(index) else: return eof
5e87b42c63baacaa7e281ace7ef05c3bd2229abb
437,534
def rgb_f2i(rgb): """Transforms the float 0.0-1.0 RGB color values to integer 0-255 RGB values. """ r, g, b = rgb ri = int(255.0 * r) gi = int(255.0 * g) bi = int(255.0 * b) return (ri, gi, bi)
7f742479bc0ed956946b3aeab76486dd73ef7f6d
160,313
from typing import List from pathlib import Path def filter_paths_to_existing(*iterables) -> List[str]: """ Filter paths to only existing. """ return [path for path in iterables if Path(path).exists()]
43e31638b0eba1705000e6cf7d657c761cb83d93
466,644
def no_dilution(cf_df): """Checks if the shares of investors were NOT diluted since previous year Explanation of Dilution: https://www.investopedia.com/terms/d/dilution.asp cf_df = Cashflow Statement of the specified company """ try: issued_stock = cf_df.iloc[cf_df.index.get_loc("Issuance Of Stock"),0] # Earnings of the company through stock issuance except: issued_stock = 0 try: repurchased_stock = cf_df.iloc[cf_df.index.get_loc("Repurchase Of Stock"),0] # Expenditures of the company through stock repurchases except: repurchased_stock = 0 if (issued_stock + repurchased_stock <= 0): return True else: return False
8ee3b01f5c5c61c2d47ec15ba6e34a588d4ea1ac
558,959
import contextlib import socket def random_port(addr): """Return a randomly-chosen open port number for the given address.""" with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind((addr, 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1]
e510fbd52d8e653016cadb2ca91df92079b8ca28
229,494
def get_percentile_dict(yhat_name, valid, id_): """ Returns the percentiles of a column, yhat_name, as the indices based on another column id_. :param yhat_name: Name of column in valid in which to find percentiles. :param valid: Pandas validation frame. :param id_: Validation Pandas frame containing yhat and id_. :return: Dictionary of percentile values and index column values. """ # create a copy of frame and sort it by yhat sort_df = valid.copy(deep=True) sort_df.sort_values(yhat_name, inplace=True) sort_df.reset_index(inplace=True) # find top and bottom percentiles percentiles_dict = {0: sort_df.loc[0, id_], 99: sort_df.loc[sort_df.shape[0] - 1, id_]} # find 10th-90th percentiles inc = sort_df.shape[0] // 10 for i in range(1, 10): percentiles_dict[i * 10] = sort_df.loc[i * inc, id_] return percentiles_dict
cde37a38be2238fa27032fbac27846dad596ee18
658,302
def load_nodes_from_file(file_path: str): """ Loads nodes from file. :param file_path: path to file :return: list of nodes """ nodes = [] with open(file_path) as f: for line in f: nodes.append(int(line)) return nodes
f6158b818ab732ac30d09d1464fcc50b2879836c
309,644
def clients_url(tenant_url): """Returns the clients API endpoint for a given tenant """ return '{0}/clients/v2'.format(tenant_url)
0e5562badedab88f88f7ec706f17413b155cb7a5
175,268
def _nodes_to_road_section(origin: str, destination: str) -> str: """Create a road section 'A->B' from two nodes 'A' and 'B'.""" return f"{origin}->{destination}"
bafdda719f0d09705471f8edac312c9577e926c4
603,189
def split_text(text, operand='+', plusified=''): """ Replaces whitespaces with the operand (default: +) Depending on the use case, may want to set operand='%2B' """ term_list = text.split() for term in term_list: plusified += term + operand return plusified[:-len(operand)]
73a6f7be744ed62c7370ac12087ce30e98bd5058
138,271
def popcount(v: int) -> int: """Count the active bits in a number""" return bin(v).count("1")
49c7ed7e878e5daeeecd93aee6c897f587bcd68e
290,914
import requests def get_call_api(url, payload, headers): """Does a GET API call for a given url along with provided payload & headers. Args: url (str): Url for GET API call payload (dict): Payload for GET API call headers (dict): Headers for GET API call Returns: request: Response of GET API call """ return requests.request("GET", url, headers=headers, data=payload)
5ccba0cfb21b1b03da8d83b44e24c669a0434641
102,461
def CG(seq): """CG content of a given sequence, 0<=CG<=1. Input string, returns float""" return float(seq.count('C')+seq.count('G'))/len(seq)
9f00f4eb15d578dbca7a99f0fda5913cfc0da010
505,696
def vo_from_fqan(fqan): """ Get the VO from a full FQAN Args: fqan: A single fqans (i.e. /dteam/cern/Role=lcgadmin) Returns: The vo + group (i.e. dteam/cern) """ components = fqan.split('/')[1:] groups = [] for c in components: if c.lower().startswith('role='): break groups.append(c) return '/'.join(groups)
eb0b8fed163fd4c49750d71a0a0b568b39525322
658,829
def bytes_to_hex(byteseq: bytes) -> str: """ Convert bytes into hexadecimal string. :param byteseq: The byte sequence to be converted to hex. :return: The hexadecimal string """ return byteseq.hex()
12d9eb66a592691ccb34dd797db449882b37e390
135,292
def gen_imagesrc(d, l): """ Produces the HTML code for that set of images. d is a dict containing the name, date, location, directory, and list of pictures. l is 1 if this is a sub page, and 0 if this is home. """ name = d['name'] date = d['date'] location = d['location'] directory = d['directory'] files = d['pics'] if (l == 1): p = '<a class="nav" href="index.html">Go Home</a>' else: p = '<a class="nav" href="'+directory+'.html">Go to page</a>' srcd = '<div class="image_set"><div class="left"><h2 class="title">'+name+'</h2><p class="date">'+date+'</p><p class="location">'+location+'</p>'+p+'</div><div class="images">' for i in files: srcd = srcd + '<a href="'+directory+'/original/'+i+'"><img src="'+directory+'/final/'+i+'" class="img"/></a>' scrd = srcd + '</div></div>' return scrd
b867a6952edc94c7257cc3e874d0d9b463c79cfb
462,059
import torch def calc_accuracy(logits, labels): """ func to compute accuracy from input logits and labels, meant to be used with tensor inputs from Pytorch. This func essentially just implements np.sum(np.argmax(logits, axis=0) == labels)/len(labels). Make sure if logits is in cuda mode labels need to be in cuda mode. Inputs: logits tensor - logits of dim num classes x N labels tensor - associated labels vector size N Outputs: train_acc float - float of accuracy between [0, 1] Example Useage: >>> inputs, labels = data # get inputs and labels for current batch >>> logits = neural_network(inputs) # forward pass over inputs >>> calc_accuracy(logits, labels.cuda()) """ max_vals, max_indices = torch.max(logits, 1) train_acc = (max_indices == labels).sum().item()/max_indices.size()[0] return train_acc
304fff013ed5b8ef98fad986aef50fe6574dfee6
260,332
import colorsys def hsl_to_rgb(h, s, l): """ Converts HSL to RGB. Parameters ---------- h: :class:`int` The hue value in the range ``[0, 360]``. s: :class:`float` The saturation value in the range ``[0, 1]``. l: :class:`float` The lightness value in the range ``[0, 1]``. Returns ------- Tuple[:class:`int`, :class:`int`, :class:`int`] The RGB tuple. """ h /= 360 r, g, b = colorsys.hls_to_rgb(h, l, s) r = int(round(r * 255, 0)) g = int(round(g * 255, 0)) b = int(round(b * 255, 0)) return (r, g, b)
16d1d135744bf1b2c158f19a1981f1ee7fabfd97
679,948
def add(values, puzzle_input): """Adds the first two values and records them at the location of the third "value" in the puzzle input. Then returns the resulting puzzle input. """ result = values[0] + values[1] puzzle_input[values[2]] = result return puzzle_input
005ed1cb688c820b5017bb4abe2f5057139ea631
287,725
def find_spaces(string_to_check): """Returns a list of string indexes for each string this finds. Args: string_to_check; string: The string to scan. Returns: A list of string indexes. """ spaces = list() for index, character in enumerate(string_to_check): if character == ' ': spaces.append(index) return spaces
8bcd1d9911efab3c65e08524293b11afd449efa0
9,982
import torch def spatial_grad( func ): """ Approximate derivatives of the functions func[b,c,:,:]. dfdx, dfdy = spatial_grad( func ) In: func: torch.FloatTensor of shape BxCxhxw with B >= 1 (batch size), C = 1 or C = 3 (color channels), h,w >= 3, and [type] is 'Float' or 'Double'. Contains the values of functions f_b: R^2 -> R^C, b=1,...,B, on the grid {0,...,h-1}x{0,...,w-1}. Out: dfdx: torch.FloatTensor dfdy: torch.FloatTensor of shape BxCxhxw contain the x and y derivatives of f_1, ..., f_B at the points on the grid, approximated by central differences (except on boundaries): For b=0,...,B-1, c=0,...,C, i=1,...,h-2, j=1,...,w-2 dfdx[b,c,i,j] = (func[b,c,i,j+1] - func[b,c,i,j-1])/2 dfdx[b,c,i,j] = (func[b,c,i+1,j] - func[b,c,i-1,j])/2 positive x-direction is along rows from left to right. positive y-direction is along columns from above to below. """ # Derivative in x direction (rows from left to right) dfdx = torch.zeros_like( func ) # forward difference in first column dfdx[:,:,:,0] = func[:,:,:,1] - func[:,:,:,0] # backwards difference in last column dfdx[:,:,:,-1] = func[:,:,:,-1] - func[:,:,:,-2] # central difference elsewhere dfdx[:,:,:,1:-1] = 0.5*(func[:,:,:,2:] - func[:,:,:,:-2] ) # Derivative in y direction (columns from above to below) dfdy = torch.zeros_like( func ) # forward difference in first row dfdy[:,:,0,:] = func[:,:,1,:] - func[:,:,0,:] # backwards difference in last row dfdy[:,:,-1,:] = func[:,:,-1,:] - func[:,:,-2,:] # central difference elsewhere dfdy[:,:,1:-1,:] = 0.5*(func[:,:,2:,:] - func[:,:,:-2,:] ) return dfdx.detach(), dfdy.detach()
d5140a3bff810637fb3fab7902027f135c99085a
514,705
def scf_mult(mult): """ Creates string for NWChem multiplicity in the SCF part of the deck. """ if mult == 1: return 'singlet' elif mult == 2: return 'doublet' elif mult == 3: return 'triplet'
c2dbf25f70ae6fc18661d6c7ba3ab31f8b428d96
181,962
def frequency_count(seg_list): """ 统计词频 :param seg_list: 分词列表 :return: 返回值为一个字典,记录每个词出现的次数 """ frequency = {} for item in seg_list: if item not in frequency: frequency[item] = 1 else: frequency[item] += 1 return frequency
17d60f2d6d7a96eaf41cd735a8c81b4928cb04e3
523,718
import re def regex_sub(value, pattern, substitute): """ Substitue pattern in the value args: value (str): value that need the substitution pattern (str): regex patten that need to be checked substitute (str): regex pattern that need to be substituted """ return re.sub(pattern, substitute, value)
228ff2f645a5275f34dc9480bff062b43784fe6c
156,316
def shortest_deg(src, dest): """Find shortest signed angle between dest and src.""" return (dest - src + 180) % 360 - 180
06369e81377e4a6b41f7ed3ae75eca8270b1f4ba
341,601
def readFastaGenome(fa_path): """ Reads genome file in fasta format. :param fa_path: Path to fasta file :return: Genome dictionary """ genome = {} genome_id = "" """ genome = { "$GENOME_ID" : $GENOME_STRING } """ # Open and read fasta file located in fa_path with open(fa_path) as raw_fa: tmp_str = "" # read every single line till EOF for line in raw_fa: # if line is a fasta header if line[0] == ">": # extract genome name (id) genome_id = line[1:].replace("\n", "") else: # append nucleotides'sequence tmp_str = tmp_str + (line.replace("\n", "")) genome[genome_id] = tmp_str return genome
e43c27ad7b2125c72e6b14f930381f8869e8df3c
231,049
import functools def data_source_factory(name=None, **properties): """Decorator for applying to a data source defined as a factory. The decorator can be applied to a class or a function. The class constructor or function must accept arguments of 'settings', being configuration settings for the data source, and 'environ' being information about the context in which the data source is being used. The resulting object must be a callable which directly returns an iterable/generator with the metrics for each sample. """ def _decorator(func): @functools.wraps(func) def _properties(settings): def _factory(environ): return func(settings, environ) d = dict(properties) d['name'] = name d['factory'] = _factory return d return _properties return _decorator
6c2f23f73905aebbacd807d8a3d3b3d8a55c973f
412,635
def _get_int(indexable_container, index, default): """try to get an int from an indexable container. If that fails return the default""" try: return int(indexable_container[index]) # exceptions separated to make case coverage clearer except (IndexError, KeyError): # item not found in the container return default except ValueError: # conversion to integer has failed return default
360683431a6c3bf8e98b7e1a3ae7918da1373fce
312,548
def binary_search_recursive(array, item, left=None, right=None): """Return index of item in sorted array or none if item not found.""" # BASE CASE: left and right point to same index if left == right and left is not None: # not in array if left == len(array): return None # check if index is item if array[left] == item: return left else: return None # if left and right are none set to length arr if left is None: left = 0 if right is None: right = len(array) # set the middle index mid = (left + right) // 2 # check if mid index is item if array[mid] == item: return mid # check if index's item is greater elif array[mid] > item: # nothing left to search if left == mid: return None # change right to middle index - 1 return binary_search_recursive(array, item, left, mid - 1) # check if item at index less than elif array[mid] < item: # nothing left to search if right == mid: return None # change left to middle index + 1 return binary_search_recursive(array, item, mid + 1, right)
39fa4f85e789333d35dfcfcf817f4887ea4df9d0
109,320
import math def p_to_q(p): """ Turn error probability into Phred-scaled integer """ return int(round(-10.0 * math.log10(p)))
d60ee7bb434a599d641ca61156361779955e09bd
651,516
def prod(iterator): """Product of the values in this iterator.""" p = 1 for v in iterator: p *= v return p
6a27625e89dcf312cd51c879fb9fc451f4ce2843
373,224
from typing import List def max_array_sum(arr: List[int]) -> int: """ Return the maximum sum of an array subset of non-adjacent items. :time: O(n) :space: O(n) """ max_sum = [0] * (len(arr) + 2) for i, num in enumerate(arr, start=2): max_sum[i] = max(max_sum[i - 2] + num, max_sum[i - 1]) return max_sum[-1]
004aeed629bdd35d86e6677e07013a35d44cdf54
616,549
def simplify_postags(tagged_words): """ Convert part-of-speech tags (Penn Treebank tagset) to the 4 tags {_N, _V, _J, _X} for nounds, verbs, adjectives/adverbs, and others. Beware that this method takes a list of tuples and returns a list of strings. :param tagged_words: [(str,str)] -- words and their associated POS-tags :return: [str] -- words ending with {_N, _V, _J, _X} """ postags = {"N": ["NN", "NNS", "NNP", "NNPS"], "V": ["VB", "VBD", "VBG", "VBN", "VBZ", "VBP"], "J": ["JJ", "JJR", "JJS"]} simplified = [] for w, t in tagged_words: if t in postags["N"]: simplified.append("_".join([w, "N"])) elif t in postags["V"]: simplified.append("_".join([w, "V"])) elif t in postags["J"]: simplified.append("_".join([w, "J"])) else: simplified.append("_".join([w, "X"])) return simplified
e0106ffab7b9c4d8bbc3cc1fb7fa0ccc816dfec6
422,346
def _actual_index(arg): """Turn a string in a integer or slice.""" if ':' in arg: idxs = arg.split(':') if len(idxs) > 3: raise ValueError(f'{arg} is an invalid slice') idxs[0] = int(idxs[0]) if idxs[0] else None idxs[1] = int(idxs[1]) if idxs[1] else None if len(idxs) == 3: idxs[2] = int(idxs[2]) if idxs[2] else None else: idxs = idxs[0:2] + [1] return slice(*idxs) return int(arg)
42980424950c744fa69831376ce9075948fad084
231,517
def make_ewm_features(cols_roll, ewm_alpha, df_input): """ Make mean ewm features based on cols_roll, ewm_alpha. Make std ewm features for 'playMin'. Return list of column names containing all the ewm features created. Parameters: ----------- cols_roll -- (list) a list of column names used to make ewm features ewm_alpha -- (list) a list of numbers used as the alpha for makign ewm features df_input -- (pd DataFrame) the input dataframe which contains the features used for making ewm features Return: ----------- cols_created_ewm -- (list) a list of column names of the created ewm features df_output -- (pd DataFrame) the output dataframe containing all the ewm features """ cols_created_ewm = [] df_output = df_input.copy() for col in cols_roll: for alpha in ewm_alpha: new_col_mean = col+'_ewm_0'+str(alpha-int(alpha))[2:] # create ewm feature name cols_created_ewm.append(new_col_mean) df_output.loc[:, new_col_mean] = df_input[col].ewm(alpha=alpha, min_periods=1).mean() if col == 'playMin': new_col_std = col+'_ewm_std_0'+str(alpha-int(alpha))[2:] # create ewm feature name df_output.loc[:, new_col_std] = df_input[col].ewm(alpha=alpha, min_periods=1).std() cols_created_ewm.append(new_col_std) return cols_created_ewm, df_output
f4ce82d5ab3e0ee04d004747a0fe6440f874241c
406,889
import re def changeAllAttributesInText(text, attribute, newValue, append): """ Changes the specified attribute in all tags in the provided text to the value provided in newValue. If append is 0, the value will be replaced. If append is anything else, the value will be appended to the end of the old attribute value. Returns a string containing the edited text. """ regExpression = re.compile(r'([ \n]*)' + attribute + r'= *"(.*)"') if append == 0: resultingString = regExpression.sub(r'\1' + attribute + r'="' + newValue + '"', text) else: resultingString = regExpression.sub(r'\1' + attribute + r'="\2' + newValue + '"', text) return resultingString
4da10f29eaa032c5d44ba40d9776d66fb1ae59f2
652,353
def get_hidden_layers(hidden_layers): """Get a list of sizes for hidden layers Arguments: hidden_layers -- Can be an int if we only want one layer or a list if we want more layers Returns: A list constructed from one passed int or the lsit that was passed """ if type(hidden_layers) is list: return hidden_layers else: return [hidden_layers, hidden_layers]
b055f451dc3fc69ba318eed83943655f839a56bd
190,623
def get_3d_ps(ps,p0,p1): """ Reorders XYZ coordinate pairs, rotating as necessary to display correctly. Parameters ---------- ps : numpy.ndarray A set of coordinates for each joint. p0 : int Index for the start joint. p1 : int Index for the end joint. Returns ------- ps_3d : list of list The start and end points for the XYZ coordinates. """ return [ps[p0][0],ps[p1][0]],[ps[p0][2],ps[p1][2]],[-ps[p0][1],-ps[p1][1]];
4d49375983bf17a0505ad251b66534cfbeea179e
244,010
import inspect def _num_required_args(func): """ Number of args for func >>> def foo(a, b, c=None): ... return a + b + c >>> _num_required_args(foo) 2 >>> def bar(*args): ... return sum(args) >>> print(_num_required_args(bar)) None borrowed from: https://github.com/pytoolz/toolz """ try: spec = inspect.getfullargspec(func) if spec.varargs: return None num_defaults = len(spec.defaults) if spec.defaults else 0 return len(spec.args) - num_defaults except TypeError: return None
dacc2ed0165ea8bc1e4be45bf2a9477778d2fe45
24,768
def convert_tensor_to_numpy(tensor): """ Convert from various forms of pytorch tensors to numpy arrays. Note: torch tensors can have both "detach" and "numpy" methods, but numpy() alone will fail if tensor.requires_grad is True. """ if hasattr(tensor, "detach"): # pytorch tensor with attached gradient tensor = tensor.detach() if hasattr(tensor, "numpy"): # pytorch tensor tensor = tensor.numpy() return tensor
3f3bf3af4385717e2495e8a733f61d4173dbaa33
288,362
def _check_1d_arrays(arrays): """Check if all arrays in an list of arrays are 1d casadi array of shape (n, 1)""" for a in arrays: if not (a.shape[1] == 1): return False return True
9ad89a14710b111694793bce459391a65ae96ce4
505,695
import re def parse_checksum_row(row): """ Args: row: a line of text from pt-table-checksum Returns: An array of elements, if the regex matches [ts, errors, diffs, rows, chunks, chunks_skipped, elapsed_time, db, tbl] Ex: [ '08-30T06:25:33', '0', '0', '28598', '60', '0', '0.547', 'pbdata04159', 'userstats' ] If the regex doesn't match, return nothing. """ p = re.compile(''.join("^(\d+-\d+T\d+:\d+:\d+)\s+(\d+)\s+(\d+)\s+" "(\d+)\s+(\d+)\s+(\d+)\s+(\d+\.\d+)\s+" "(.+?)\.(.+)$")) m = p.match(row) if m: return m.groups()
4f239ba582c07a7135d00e7078ec578dcd13de83
48,342
def is_valid(puzzle, guess, row, col) -> bool: """ Determines whether the guess at the row/col is valid or not :param puzzle: :param guess: :param row: :param col: :return: bool """ row_val = puzzle[row] if guess in row_val: return False col_val = [puzzle[i][col] for i in range(9)] if guess in col_val: return False # now we want to get where the 3x3 square starts # and iterate over the 3 values in the row/column row_start = (row // 3) * 3 col_start = (col // 3) * 3 for r in range(row_start, row_start + 3): for c in range(col_start, col_start + 3): if puzzle[r][c] == guess: return False # now if all the checks pass return True
d504c7934cc9c9a40588d6d09680cb7eb8ea04c3
215,779
def SignedBinaryEmitter(target, source, env): """Add the signing certificate (if any) to the source dependencies.""" if env.subst('$CERTIFICATE_PATH'): source.append(env.subst('$CERTIFICATE_PATH')) return target, source
110b99274539f06bc658eb0604a9f203be4341a7
274,981
from functools import reduce def geo_mean(returns): """ quickly calculate the geometric mean of a series :param returns: the data to calc the mean :return: the geometric mean """ return (reduce(lambda x, y: x * y, returns)) ** (1.0 / len(returns))
5a0a164bc244a1d36e56f551abdf8d929355b220
284,545
def is_valid_table_name(cur, table_name): """ Checks whether a name is for a table in the database. Note: Copied from utils.database for use in testing, to avoid a circular dependency between tests and implementation. Args: cur: sqlite3 database cursor object table_name (str): name to check Returns: True if valid, False otherwise """ query = """ SELECT 1 FROM sqlite_master WHERE type == 'table' AND name == ? """ res = cur.execute(query, (table_name,)) return res.fetchone() is not None
f1efc66220baa215a73f374da19842ab38c619be
707,261
def num_to_chrom(chrom): """Add leading 'chr' if it doesn't exist.""" return 'chr' + chrom if not chrom.startswith('chr') else chrom
020f8e7f76b97f6a53f53f6cc6a716194993f194
218,568
def process_rooms(df): """Takes a dataframe and isolate the numbers of rooms. Parameters ---------- df : The dataframe to search. Returns ------- The dataframe processed. """ df['bedroom'] = df['bedroom'].str.split(' ').str[0] df['bathroom'] = df['bathroom'].str.split(' ').str[0] return df
78f6bbf597cc0920c5217d123a91eae7e71f2012
359,663
def dict_from_keys(adict, keys): """ Selects a subset of a dictionary with a list of keys. :param adict: A dictionary. :param keys: A list of keys. :returns: A subset of the input dictionary. >>> from dautils import collect >>> adict = {'a.latex': 1, 'b.latex': 2, 'c': 3} >>> collect.dict_from_keys(adict, ['b.latex', 'a.latex']) {'a.latex': 1, 'b.latex': 2} """ return {k: adict[k] for k in keys}
e2a2d85ba4a57f43d825d0e53a9d587d3ddcb3ba
337,069
import six def bitcast_to_bytes(s): """ Take a string and return a string(PY2) or a bytes(PY3) object. The returned object contains the exact same bytes as the input string. (latin1 <-> unicode transformation is an identity operation for the first 256 code points). """ return s if six.PY2 else s.encode("latin1")
b902550be03f447a286490653a2a1361257ac88c
701,748
from typing import Any def identity(value: Any) -> Any: """Returns its argument.""" return value
e80f1dc04e10cd9127b2db3cbf06c71554639dd7
493,185
from typing import List from typing import Tuple import re def parse_molecular_formula(formula: str) -> List[Tuple[str, int]]: """ Parse a molecular formulat to get the element types and counts. Args: formula: molecular formula, f.i. "C8H3F3Br" Returns: A list of tuples containing element types and number of occurrences. """ matches = re.findall(r'([A-Z][a-z]*)(\d*)', formula) # Convert matches to the required format results = [] for match in matches: # convert count to an integer, and set it to 1 if the count is not visible in the molecular formula count = 1 if not match[1] else int(match[1]) results.append((match[0], count)) return results
135296d9c4381b4e6741eca297db39ace00621dc
298,175
def risk_reduction(model, data_treatment, data_control): """Compute predicted risk reduction for each row in data""" treatment_risk = model.predict_proba(data_treatment)[:, 1] control_risk = model.predict_proba(data_control)[:, 1] return control_risk - treatment_risk
8c581faef639abe65c7a67e6573290064f47e5dc
133,091
def reverse_url(context, name, **parts): """ jinja2 filter for generating urls, see http://aiohttp.readthedocs.io/en/stable/web.html#reverse-url-constructing-using-named-resources Usage: {{ 'the-view-name'|url }} might become "/path/to/view" or with parts and a query {{ 'item-details'|url(id=123, query={'active': 'true'}) }} might become "/items/1?active=true see app/templates.index.jinja for usage. :param context: see http://jinja.pocoo.org/docs/dev/api/#jinja2.contextfilter :param name: the name of the route :param parts: url parts to be passed to route.url(), if parts includes "query" it's removed and passed seperately :return: url as generated by app.route[<name>].url(parts=parts, query=query) """ app = context['app'] kwargs = {} if 'query' in parts: kwargs['query'] = parts.pop('query') if parts: kwargs['parts'] = parts return app.router[name].url(**kwargs)
963737f6fe4ee2fb3a79bb419051e815295253ef
636,421
import random import click def main(filename, n, max_words, min_word_length, max_word_length): """ Generate an xkcd passphrase randomly selected from a list of words. """ def get_words(filename): return filename.readlines() def get_candidates(words, min_length, max_length): return [x for x in words if min_length <= len(x) <= max_length] def get_random_words(words, num_words): return random.sample(words, num_words) def get_phrase(words): return ''.join([x.strip().lower() for x in words]) words = get_words(filename) candidates = get_candidates(words, min_word_length, max_word_length) for _ in range(0, n): random_words = get_random_words(candidates, max_words) click.echo(get_phrase(random_words))
1fde702a6d5197213280aaddc90b10a0e70987e4
116,256
def data_dir() -> str: """The directory where result data is written to""" return '/tmp/facebook_ads'
f4be2a777ddf9b69c9a7e2cc478149e1b64739bc
424,430
from typing import Iterable def rgb_hexify(rgb: Iterable[int]) -> str: """Convert a list of RGB numbers to a hex format. """ return ''.join( list(map( lambda x: hex(abs(x))[2:].zfill(2), rgb ))[::-1] )
b3acc17d105de8190a4e386d7e2f71c834601ebe
665,097
def encode_str(string, encoding="utf-8", errors="strict"): """Return an encoded byte object of the input string. Parameters ---------- string : string encoding : string Default is `utf-8`. errors : string Specifies how encoding errors should be handled. Default is `strict`. """ return str(string).encode(encoding=encoding, errors=errors)
d4493b6b60631134ea85add879b2ffd473e00a9a
525,313
def hit(filenames, method, *args, **kwargs): """ Run the given accessor method with args & kwargs; if found remove the result path from filenames and return True, else return False. """ try: medium = method(*args, **kwargs) assert medium.exists except ValueError: return False except: print('Error while processing', method, args, kwargs) raise try: filenames.remove(medium.path) except KeyError: pass return True
dcd025fe3e299290cf43e661d934e845d5c325dd
278,964
def translator(source, target, phrase, version='0.0 test', charset='utf-8'): """ Returns the url encoded string that will be pushed to the translation server for parsing. List of acceptable language codes for source and target languages can be found as a JSON file in the etc directory. Some source languages are limited in scope of the possible target languages that are available. .. code-block:: python >>> from translate import translator >>> translator('en', 'zh-TW', 'Hello World!') '你好世界!' :param source: Language code for translation source :type source: String :param target: Language code that source will be translate into :type target: String :param phrase: Text body string that will be url encoded and translated :type phrase: String :return: Request Interface :rtype: Dictionary """ url = 'https://translate.google.com/translate_a/single' agent = 'User-Agent', 'py-translate v{}'.format(version) content = 'Content-Type', 'application/json; charset={}'.format(charset) params = {'client': 'a', 'ie': charset, 'oe': charset, 'dt': 't', 'sl': source, 'tl': target, 'q': phrase} request = {'method': 'GET', 'url': url, 'params': params, 'headers': dict([agent, content])} return request
a110c522031754eb8b38431697584b69e025ad60
162,169
def v(i, j, d): """ Return the number of the variable of cell i, j and digit d, which is an integer in the range of 1 to 729 (including). """ return 81 * (i - 1) + 9 * (j - 1) + d
6e6399e2715a73c687b88e1d03aad56c172cadc4
433,362
def remove_workflow_name(name): """ Remove the workflow name from the beginning of task, input and output names (if it's there). E.g. Task names {workflowName}.{taskName} => taskName Input names {workflowName}.{inputName} => inputName Output names {workflowName}.{taskName}.{outputName} => taskName.outputName """ partitioned = name.partition('.') name = partitioned[2] if partitioned[2] != '' else partitioned[0] return name
00efd3c6d900ca7e99178bd02021644789802fd5
58,826
def _get_frame_op_default_axis(name): """ Only DataFrame cares about default_axis, specifically: special methods have default_axis=None and flex methods have default_axis='columns'. Parameters ---------- name : str Returns ------- default_axis: str or None """ if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']: # bool methods return 'columns' elif name.startswith('__'): # __add__, __mul__, ... return None else: # add, mul, ... return 'columns'
c8470a5a7da830e8458a7211a8710a87553f4a9b
216,555
def wrap_space_around(text): """Wrap one additional space around text if it is not already present. Args: text (str): Text Returns: str: Text with extra spaces around it. """ if text[0] != " " and text[-1] != " ": return " " + text + " " elif text[0] != " ": return " " + text elif text[-1] != " ": return text + " " else: return text
cd5f61958db6e2b51952b25513ca7739d745f403
389,078
def _naics_level(code: str) -> int: """_naics_level is a helper that allows us to determine what level the employment sector is talking about Parameters ---------- code : str NAICS Sector as a string in the format xx-xxxxxx Returns ------- int: The number corresponding to the level of the NAICS sector """ codestr = code.split("-")[-1] # Get the part of the string we care about return ( len(codestr) - (codestr.count("0")) + 2 )
c6626753645f59fcfca03820848c94d0d237b1e1
336,546
def list_intersection(a_list, b_list, transform=lambda element : element): """Return a list of the intersection of two lists. Parameters ---------- a_list : sequence First list. The order of the returned list is determined by this list. b_list : sequence or set Second list. Returns ------- result : list The intersection of the input lists with the transformation performed on each element Other Parameters ---------------- transform : function(element) A function to perform on each element while the intersection is being executed. """ # Convert the second list to a set, if necessary b_set = b_list if not isinstance(b_set, set): b_set = set(b_list) # Do the intersection and return a list. result = [transform(value) for value in a_list if transform(value) in b_set] return result
56086cab72c76b524bc00e955272d81717163fdc
383,172
def load_params(params_filepath): """Load parameter dict from file (tab separated).""" params = {} with open(params_filepath, 'r', encoding='utf-8') as params_str: for line in params_str: line = line.strip() items = line.split('\t') if items[0] in ['shuffle', 'cuda', 'reverse']: params[items[0]] = items[1] == 'True' elif items[0] in ['max_seq_len', 'min_count']: params[items[0]] = int(items[1]) else: raise Exception('Unsupported dataset parameter: {}' .format(items[0])) return params
ebcb6abcbbf6c6749fa1f1f26455c283483beba3
273,612
from typing import List from typing import Tuple def _split_chunk_bounds( start: int, stop: int, multiple: int, ) -> List[Tuple[int, int]]: # pylint: disable=g-doc-args # pylint: disable=g-doc-return-or-yield """Calculate the size of divided chunks along a dimension. Example usage: >>> _split_chunk_bounds(0, 10, 3) [(0, 3), (3, 6), (6, 9), (9, 10)] >>> _split_chunk_bounds(5, 10, 3) [(5, 6), (6, 9), (9, 10)] >>> _split_chunk_bounds(10, 20, 12) [(10, 12), (12, 20)] """ if multiple == -1: return [(start, stop)] assert start >= 0 and stop > start and multiple > 0, (start, stop, multiple) first_multiple = (start // multiple + 1) * multiple breaks = list(range(first_multiple, stop, multiple)) return list(zip([start] + breaks, breaks + [stop]))
ebbc2ff86ca29f3eb72040944bdcc61cbab318ca
642,463
def stations_level_over_threshold(stations, tol): """returns a list of tuples, where each tuple holds (i) a station (object) at which the latest relative water level is over tol and (ii) the relative water level at the station. The returned list should be sorted by the relative level in descending order Parameters: stations = list of MonitoringStation objects with updated water levels tol = tolerance of relative water level shown in final list (float) Returns: list of [station (MonitoringStation), relative_water_level (float)] tuples""" # create empty list over_threshold_list = [] # Loop over all station objects in stations list for station in stations: # Check if station has relative water level above tol if (station.relative_water_level != None) and (station.relative_water_level > tol): # Create tuple over_threshold_tuple = (station, station.relative_water_level) # Add tuple to list over_threshold_list.append(over_threshold_tuple) # sort list by the relative level in descending order over_threshold_list.sort(key=lambda x: x[1], reverse=True) return over_threshold_list
131c51b8085903b551eceb35432d2be07a0326e4
225,157
def with_services(services=[]): """ Decorator function Checks if there specific services available If not then it throws an error Examples -------- Check that specific services exist @with_services(services=['read_pressure']) def myfunction(self): ... self.services.read_pressure() Parameters ---------- services : list of str List of services that must be available """ # Make sure inputs are lists if isinstance(services,str): coords = [services] # Create the decorator function def decorator(func): """ Decorator function to be returned Parameters ---------- func : function reference The actual function to be decorated """ # Wrapper function that does all the work # def wrapper(self,*arg,**kwargs): # Check ds_results # ============================== if not hasattr(self,'services'): raise ValueError('This object has no "services" property') missing_services = [c for c in services if c not in self.services] if missing_services!=[]: raise ValueError(f'Test sequence is missing services: {missing_services}') # Run the actual function # ============================== res = func(self,*arg,**kwargs) return res # Documentation wrapper.__name__ = func.__name__ wrapper.__doc__ = func.__doc__ return wrapper return decorator
ce451a68c6c76fda48ec05033a6efd6df1b70c8d
608,290
def is_same_shape(T1, T2): """ Two partial latin squares T1, T2 have the same shape if T1[r, c] = 0 if and only if T2[r, c] = 0. EXAMPLES:: sage: from sage.combinat.matrices.latin import * sage: is_same_shape(elementary_abelian_2group(2), back_circulant(4)) True sage: is_same_shape(LatinSquare(5), LatinSquare(5)) True sage: is_same_shape(forward_circulant(5), LatinSquare(5)) False """ for i in range(T1.nrows()): for j in range(T1.ncols()): if T1[i, j] < 0 and T2[i, j] < 0: continue if T1[i, j] >= 0 and T2[i, j] >= 0: continue return False return True
4602f7cb2a093393445f7e23f2d9f539021d2f7a
29,921
import re def replace_tokens(text, values): """ Replace tokens as specified in a passed dictionary {k: [v1, v2, v]} where tokens v in the text will be replaced by token k. """ for k, v in values.items(): for i in v: rx = rf"(^|(?<=[^\-]))(\b({i})\b)((?=[^\-])|$)" text = re.sub(rx, k, text, flags=re.IGNORECASE) return text
40f7836d6f3df1d0be73a86a84b735921dd811b1
364,372
def b58encode(bytes): """ Base58 Encode bytes to string """ __b58chars = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ' __b58base = len(__b58chars) long_value = int(bytes.encode("hex_codec"), 16) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result return result
a1df07d75c6af99d7ee9de5e6f5465f49320efa4
372,666
def etaCalc(T, Tr = 296.15, S = 110.4, nr = 1.83245*10**-5): """ Calculates dynamic gas viscosity in kg*m-1*s-1 Parameters ---------- T : float Temperature (K) Tr : float Reference Temperature (K) S : float Sutherland constant (K) nr : float Reference dynamic viscosity Returns ------- eta : float Dynamic gas viscosity in kg*m-1*s-1 """ eta = nr * ( (Tr + S) / (T+S) )*(T/Tr)**(3/2) return eta
3f8182ea29fd558e86280477f2e435247d09798e
3,037
def rst_anchor(label): """Format a label as an Envoy API RST anchor.""" return f".. _{label}:\n\n"
8d69da3f6905d1783984d99fa3489e0ebe3ea95b
321,705
import re def round_decimals_in_string(string, round_to=6): """ Round all decimals in a string to a specified number of places. Usage ===== >>> s = "pi is 3.141592653589793 and e is 2.71828182845904523536028747 and one is 1.000" >>> round_decimals_in_string(s) 'pi is 3.141593 and e is 2.718282 and one is 1.000' Note that the final occurrence of 1.000 was not rounded. """ pattern = r"([0-9]\.[0-9]{{{round_to}}}[0-9]+)".format(round_to=round_to) def replacer(match): number = float(match.group(1)) formatter = "{{0:.{round_to}f}}".format(round_to=round_to) return formatter.format(number) return re.sub(pattern, replacer, string)
62b52f8e770e7c48f9c21fede4005d8584ca91ea
603,748
def to_snake_case(name): """Convert a name from camelCase to snake_case. Names that already are snake_case remain the same. """ name2 = "" for c in name: c2 = c.lower() if c2 != c and len(name2) > 0 and name2[-1] not in "_123": name2 += "_" name2 += c2 return name2
d247bfe28ce61835f5089830ad5c68674800376d
299,353
def dependency_list(e): """ Get the set of config identifiers referred to by an expression. A set is returned instead of a list as we don't need duplicates, and order doesn't matter. """ if e is None: return set() assert type(e) == tuple if e[0] in ['and', 'or', '=', '!=', '<', '<=', '>', '>=', '+', '-']: return dependency_list(e[1]) | dependency_list(e[2]) elif e[0] == 'not': return dependency_list(e[1]) elif e[0] in ['string', 'number', 'boolean']: # Quoted string, number or boolean return set() elif e[0] == 'identifier': return {e[1]} raise Exception("Unexpected depend list: " + str(e))
1ab8e6e008a61d7dd8917e40b6cca36c21a50d58
485,128
import re def _sort_names(names): """ Sort peeker names by index and alphabetically. For example, the peeker names would be sorted as a[0], b[0], a[1], b[1], ... """ def index_key(lbl): """Index sorting.""" m = re.match(".*\[(\d+)\]$", lbl) # Get the bracketed index. if m: return int(m.group(1)) # Return the index as an integer. return -1 # No index found so it comes before everything else. def name_key(lbl): """Name sorting.""" m = re.match("^([^\[]+)", lbl) # Get name preceding bracketed index. if m: return m.group(1) # Return name. return "" # No name found. srt_names = sorted(names, key=name_key) srt_names = sorted(srt_names, key=index_key) return srt_names
0ebffe7c4676319597abf6c7e19f4732d9e0957d
388,487
def load_binary_list(path): """Loads reference binary classifier output. """ bits = [] with open(path, 'r') as fd: for line in fd: if (not line.strip()) or line.startswith('#'): continue bits.append(1 if line.startswith('y') else 0) return bits
cdc5a5703f4a8bbbc2c87499ae692d9054078b1f
252,038
def top_level_check(self, event): """ Top level check for menus with `.user_check` attribute. Parameters ---------- self : ``Menu`` The respective menu instance. event : ``InteractionEvent`` The received interaction event. Returns ------- should_process : `bool` Whether the menu should process the received interaction. """ check = self.user_check if check is None: should_process = True else: should_process = check(event) return should_process
3e587adbcd09b34b0e9cab9234278e6fb072d4b8
216,959
def find_divisors_v1(n): """Finds all divisors for a given number 'n' different than that number.""" if n == 1: return [1] return [x for x in range(1, n) if n%x == 0]
8ac88a66b90e9c85a6d8c341f25c03ae8fefed29
626,125
import pickle def pull_offset(fname): """Get progress from ``.status`` file.""" try: with open(fname + '.status', 'rb') as f: return pickle.load(f) except IOError: return 0
2a80f5b598c3334b5d9f0246c0e2f2074b3cf887
165,787
def _LJ_rminepsilon_to_ab(coeffs): """ Convert rmin/epsilon representation to AB representation of the LJ potential """ A = coeffs['epsilon'] * coeffs['Rmin']**12.0 B = 2 * coeffs['epsilon'] * coeffs['Rmin']**6.0 return {"A": A, "B": B}
0963c0e8b949d35842660a499ce80a388485773f
704,233
import colorsys def change_lightness_color(color, factor: float): """ Darken or lighten a RGB color according to factor. @Params: ------- color (Tuple[R,G,B]): a tuple of RGB color. factor (float): If factor < 1, then the lightened color is returned, otherwise the darkened color is returned. """ h, l, s = colorsys.rgb_to_hls(*color) return colorsys.hls_to_rgb(h, 1 - factor * (1 - l), s)
b09f24aedb4708069206701ed17e4874d8ccad6b
232,071
import math def rank_with_ties(counter): """ given given a Counter build dictionary mapping each item to its rank. e.g. {'ὁ': 1, 'καί': 2, 'αὐτός': 3, 'ἐγώ': 4, 'λέγω': 5, ...} Note that items occuring the same number of times will have the same rank and it will always be 1 more than the number of more frequency items so ranks are skipped if there are ties (you might get 1, 2, 2, 4 for example). """ item_rank = {} prev_count = math.inf # == no previous inc1 = 0 # this goes up with each item inc2 = 0 # this is set to inc1 whenever the next item has a lower count for count, item in sorted(((count, item) for item, count in counter.items()), reverse=True): inc1 += 1 if count < prev_count: inc2 = inc1 prev_count = count item_rank[item] = inc2 return item_rank
ed1ce3cc4f6197a10730a0103940a19922dd7cf7
187,016
def point_box_relation(u, vbox): """ Check in which point is located related to a box :param u: point to check (y, x) :param vbox: box to check point with (y0, x0, y1, x1) :return: code with the location of the point 0 3 8 --- 2 | 4 | 7 --- 1 6 9 """ uy, ux = u vy0, vx0, vy1, vx1 = vbox if (ux < vx0 and uy <= vy0) or (ux == vx0 and uy == vy0): relation = 0 # 'left-above' elif vx0 <= ux < vx1 and uy <= vy0: relation = 3 # 'above' elif (vx1 <= ux and uy < vy0) or (ux == vx1 and uy == vy0): relation = 8 # 'right-above' elif vx1 <= ux and vy0 <= uy < vy1: relation = 7 # 'right-of' elif (vx1 < ux and vy1 <= uy) or (ux == vx1 and uy == vy1): relation = 9 # 'right-below' elif vx0 < ux <= vx1 and vy1 <= uy: relation = 6 # 'below' elif (ux <= vx0 and vy1 < uy) or (ux == vx0 and uy == vy1): relation = 1 # 'left-below' elif ux <= vx0 and vy0 < uy <= vy1: relation = 2 # 'left-of' elif vx0 < ux < vx1 and vy0 < uy < vy1: relation = 4 # 'inside' else: relation = None return relation
fb305ce83a142247b573f7055237d5fbff5a2219
83,893
def insert(new, target, n, pad=" "): """ insert new string to target as position n padded with pad characters """ if n==0: return new+target elif n>len(target): return target + pad*(n-len(target)) + new return target[0:n] + new + target[n:]
3a498e090807d5bdf65973c610db1fa3052f08eb
370,697
def value_counts_table(df, feature_name): """ Creates a value counts dataframe. Args: df: Pandas DataFrame object. feature_name: Specified feature column name. Returns: Returns back a pandas Dataframe object of a feature's value counts with percentages. """ # Value counts DataFrame value_count_df = df[feature_name].value_counts().rename_axis( 'Unique Values').reset_index(name='Counts') total_count = sum(df[feature_name].dropna().value_counts().values) value_count_df["Percantage"] = ["{0:.4f}%".format(count/total_count * 100) for value, count in df[feature_name].value_counts().items()] value_count_df.set_index('Unique Values', inplace=True) return value_count_df
2077f073289638dd5fca639275edb6454f0512f9
172,896
def cast_to_str(labels, nested=False): """ Convert every label to str format. If nested is set to True, a flattened version of the input list is also returned. Args: labels: list Input labels nested: bool Indicate if the input list contains (or may contain) sublists. False by default. If True, a flattened version of the list is also returned. Results: labels_str: list Labels converted to str format labels_str_flat: list Flattened list of labels. Only returned if nested is set to True. """ if not nested: labels_str = [str(x) for x in labels] return labels_str else: labels_str = [] labels_str_flat = [] for x in labels: if isinstance(x, list): sublist = [] for xx in x: labels_str_flat.append(str(xx)) sublist.append(str(xx)) labels_str.append(sublist) else: labels_str_flat.append(str(x)) labels_str.append(str(x)) return labels_str, labels_str_flat
7e5b74a137ca3dfa06c50b1c3d680befd29a617d
680,385