content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import torch def area(left_top: torch.Tensor, right_bottom: torch.Tensor): """Compute area of rectangles given two corners. :param left_top: (N, 2) left top corner :param right_bottom: (N, 2) right bottom corner :return: (N) area of the rectangle """ hw = torch.clamp(right_bottom - left_top, min=0.0) return hw[..., 0] * hw[..., 1]
b5a2f531d294308c929c7aa944f7ddc74b619afa
256,850
def percent_clear_days(clear_days, total_days): """Returns the percentage of clear days, given the number total days and clear days. Parameters ---------- clear_days : int Number of clear days. total_days : int Number of total days Returns ------- percent_clear : float Percentage of clear days. Example ------- >>> # Calculate percent clear >>> percent_clear = percent_clear_days(15, 31) >>> # Show percent clear >>> percent_clear 48.39 """ # Calculate percent clear percent_clear = round(100 * clear_days / total_days, 2) # Return result return percent_clear
e00f86c88384e2779f732d37e44cfe6081ce0929
180,292
def read_route(row): """ Reads in a single route. @param row: An iterable object containing the positions in string format. The specific format is "x_y". The connections are from the current object to the next object. @return: A tuple containing: 1) A dictionary of nodes and their corresponding positions 2) A list of node connections """ # Initialize the return values positions = {} connections = [] for i, pos in enumerate(row): x, y = [int(loc) for loc in pos.split('_')] connections.append((i, i + 1)) positions[i] = (x, y) connections.pop() return positions, connections
c57f1b02fce7b0483ad7cf0d8ccbbb983f00efb7
430,049
import re def is_valid_orcid(orcid): """ Returns true if has correct syntax for an orcid. """ # 0000-0003-1527-0030 if re.match("\d\d\d\d-\d\d\d\d-\d\d\d\d-\d\d\d[0-9X]$", orcid): return True return False
c407be613cb3db84cfcb5e9d938d02314eb6a06a
511,165
def get_cookies_str(cookies): """Get cookies string from cookies dict.""" l = [k + '=' + v for k, v in cookies.items()] return '; '.join(l)
fabe8dbb1eb314669835e100d113170ef1e2df8d
497,613
def generate_fake_oco_status(random_state, size): """ Generate random OCO status, mostly assigned to 200 with some 300 status codes during a random range between 4 and 50 ticks """ values = [200] * size picked_error_values_indexes = random_state.choice( size, round(0.001 * len(values)), replace=False ) for index in picked_error_values_indexes: values[index] = 300 _range = range(random_state.random_integers(0, 50)) for n in _range: position = index + n if position < size: values[position] = 300 return values
3dc056b2f08dd33de9ed2e5736fd2998fb3d14ff
661,115
from typing import Sequence def as_lines(trace) -> Sequence[str]: """Return the trace as a sequence of lines.""" return [' '.join(str(val) for val in pkt) for pkt in trace]
54f92f039c7eb5dba09a4fa6a3373f7b99121426
181,801
def ask(question: str, default: str = "") -> str: """A Simple interface for asking questions to user Three options are given: * question and no default -> This is plain input * question and a default value -> with no user input dafault is returned * question and 'yes'/'no' default -> user can type n, y, yes, no type-insensitive and 'yes' or 'no' is returned in any case Arguments: question (str): the question for the user Keyword Arguments: default (str): a default value (default: {""}) Returns: str -- the user answer of the default (can be empty string) """ if default == 'yes': appendix = " [Y/n] " elif default == 'no': appendix = " [y/N] " elif default: appendix = " [{}] ".format(default) else: appendix = " " try: answer = input(question + appendix) except EOFError as eof: exit("Stdin was closed. Exiting...") return answer if answer else default
e0262592b32f893043b4bc48fd32e99e6b864732
18,961
from functools import reduce def reverse_functional(value): """Reverse string in a functional way using the "reduce" function.""" return reduce((lambda result, char: char + result), value, "")
a3cc92e777417fcfeb136e8a1d49ff57043da49e
203,946
def sequence_mask(seq_ids, valid_lengths): """ Args: seq_ids (Tensor): The whole sequence index, a tensor with a shape of [batch_size, sequence_length]. valid_lengths (Tensor): The valid length of every sequence, a tensor with a shape of [batch_size]. Returns: Tensor: Returns the output sequence mask `mask`. Its dtype is `bool` and has a shape of [batch_size, sequence_length]. """ lengths_exp = valid_lengths.unsqueeze(1) mask = seq_ids < lengths_exp return mask
567de98ad3bc0cb2b7ed0cc75be513e1ee9b36de
539,262
import base64 def decode(data: str) -> bytes: """Decode base64url (RFC 4648) encoded text :param data: Base64-encoded data :returns: Raw data :raises TypeError: if ``data`` is not a string :raises ValueError: if base64-decoding fails (e.g. if `data` contains non-base64 characters) >>> decode('dGVzdA') b'test' """ if not isinstance(data, str): raise TypeError("Can only decode() strings.") return base64.b64decode(data + "===")
bf0e4f5657de096483c6fdd4b3b95cc0663ba355
560,785
def GetListOfFeatureSizes(feature_sizes): """Extract the list of the dimensionality of each feature from string of comma separated values. Args: feature_sizes: string containing comma separated list of feature sizes Returns: List of the dimensionality of each feature. Elements in the first list are integers. """ list_of_feature_sizes = [ int(feature_sizes) for feature_sizes in feature_sizes.split(',')] return list_of_feature_sizes
d5c724834d3667535bc001e9699e34b3033bb91b
589,186
def _get_element_type(tag): """This function extracts the type of tag specified in tag Args: tag (str or bytes): Full valid html tag from < to > Returns: str: type of HTML tag, e.g. div, p, meta, span, td, etc """ # decode tag parameter if its a bytes object tag = tag.decode() if isinstance(tag, bytes) else tag # clean up any leading or trailing spaces tag = tag.strip() # check that we open with an < if not tag.startswith("<"): raise ValueError("Parameter 'tag' does not start with '<'") # Start off the type variable after the open bracket tag_type = tag[1:] # Clean up leading spaces tag_type = tag_type.strip() tag_type_iterator_copy = tag_type # Find the first index of the next space or the closing brace cur_index = 0 for character in tag_type_iterator_copy: if character.isspace() or character == ">": tag_type = tag_type[:cur_index] break cur_index += 1 return tag_type
42548a9f33ceeaa1126eafdc128b496b435eb2fb
664,366
def fix2range(vals, minval, maxval): """ A helper function that sets the value of the array or number `vals` to fall within the range `minval` <= `vals` <= `maxval`. Values of `vals` that are greater than `maxval` are set to `maxval` (and similar for `minval`). Parameters ---------- vals : float or array_like The value(s) to 'fix'. minval : float The minimum value. maxval : float The maximum value. Returns ------- fixed_vals : float or array_like (matches `vals`) The fixed values. """ if not hasattr(vals, '__len__'): return max(min(vals, maxval), minval) vals[vals > maxval], vals[vals < minval] = maxval, minval return vals
d5df9ec213fdc48f6a30342b71571608a6bd1103
653,582
def notas(* num, sit=False): """ -> Função para analisar notas e situações de vários alunos. :param num: Uma ou mais notas dos alunos (aceita várias) :param sit: valor opcional, indicando se deve ou não adicionar a situação :return: dicionário com várias informações sobre a turma """ print('-'*75) resumo = {} totnum = maior = menor = soma = 0 for c, n in enumerate(num): totnum += 1 soma += n resumo['total'] = totnum if c == 0: maior = menor = n resumo['maior'] = n resumo['menor'] = n else: if n > maior: maior = n resumo['maior'] = maior elif n < menor: menor = n resumo['menor'] = menor resumo['média'] = soma / totnum if sit: if resumo['média'] >= 7: resumo['situação'] = 'BOA' elif resumo['média'] < 5: resumo['situação'] = 'RUIM' else: resumo['situação'] = 'RAZOÁVEL' return resumo
5243b6d858d372bd40ae2693db25134387242fae
121,645
def _plain_value_to_html(dict_, html=None): """Convert plain JSON-LD value to HTML.""" if html is None: html = [] html.append(dict_['@value']) return html
8579318b1ada26e1259550ff925d400bc5c5fa15
34,123
def zeros(n: int) -> int: """Return the number of trailing zeros in the binary representation of n. Return 0 if n == 0.""" result = 0 while n & 1 == 0: n = n // 2 result += 1 return result
b215f80dba4aa66ad6cf2baebb24bc6731acfd46
653,759
import hashlib def get_md5_checksum(filename): """Return md5sum of a file.""" with open(filename, "rb") as f: bytes = f.read() # read file as bytes return hashlib.md5(bytes).hexdigest()
a64eb5e63e013739b8d2c3b30aa4588cb34346e9
555,111
def chaincalls(callables, x): """ :param callables: callable objects to apply to x in this order :param x: Object to apply callables >>> chaincalls([lambda a: a + 1, lambda b: b + 2], 0) 3 """ for c in callables: assert callable(c), "%s is not callable object!" % str(c) x = c(x) return x
2da45724c945730b78876f39cf1487e2b2d33eb4
286,246
from typing import Any from typing import Callable def verify_operands_classes_and_forward_to_callable( x_operand: Any, y_operand: Any, operand_class: type, function: Callable[[Any, Any], Any], ) -> Any: """ verify_operands_classes_and_forward_to_callable verifies both operands are of type operand_class before calling function. :param x_operand: First operand. :type x_operand: Any :param y_operand: Second operand. :type y_operand: Any :param operand_class: type to compare against. :type operand_class: type :param function: callable to call. :type function: Callable[[Any, Any], Any] :raises TypeError: if either operand is not of operand_class. :return: The return value of the callable, in any. :rtype: Any """ if isinstance(x_operand, operand_class) and isinstance(y_operand, operand_class): return function(x_operand, y_operand) raise TypeError("x_operand and y_operand are not both ", operand_class)
8553b0ae796641226724cc9b0557b59dfe7a8fbf
37,161
def add_verbs(g, verb_list, vertex_shapes, vertex_names): """ Accepts a graph-tool graph object and a list of verbs to add as vertices in the graph. Updates the vertex_shapes vertex property map to make each verb a square Updates the vertex_name vertex property map to assign each verb string as a name for each vertex Creates a vertex_dict that can be used to lookup the vertex ID of a given verb string """ vertex_verb_dict = {} for vertex in verb_list: v = g.add_vertex() vertex_shapes[v] = 'square' vertex_names[v] = vertex vertex_verb_dict.update({vertex:int(v)}) return g, vertex_verb_dict
4c2a50b4667fca1502b443ee720ddae9e802b122
357,415
import torch def calculate_area(idx_sorted:torch.Tensor, vertices:torch.Tensor): """calculate area of intersection Args: idx_sorted (torch.Tensor): (B, N, 9) vertices (torch.Tensor): (B, N, 24, 2) return: area: (B, N), area of intersection selected: (B, N, 9, 2), vertices of polygon with zero padding """ idx_ext = idx_sorted.unsqueeze(-1).repeat([1,1,1,2]) selected = torch.gather(vertices, 2, idx_ext) total = selected[:, :, 0:-1, 0]*selected[:, :, 1:, 1] - selected[:, :, 0:-1, 1]*selected[:, :, 1:, 0] total = torch.sum(total, dim=2) area = torch.abs(total) / 2 return area, selected
ae4b31687c9249906f9934319a83c86f8c3bd056
544,723
from operator import neg def is_neg(t): """Whether t is of the form ~ A.""" return t.is_comb() and t.fun == neg
36fe19a9795dc6b47fc21c155d84ac49f919cb83
231,872
def neighbours( x, y, world ): """ Získá počet sousedů dané buňky. Parametry: x (int) X-ová souřadnice y (int) Y-ová souřadnice world (list) 2D pole s aktuálním stavem. Vrací: int Počet sousedů buňky na souřadnicích [x;y] """ # Počet sousedů count = 0 # maximální souřadnice na x-ové ose x_max = len(world[0])-1 # maximální souřadnice na y-ové ose y_max = len(world)-1 # Poznámka: Hodí se říct, že souřadnice mají počátek v levém horním rohu. # Směrem doprava postupuje souřadnice x a směrem dolů se zvětšuje # souřadnice y. # Pro získání y-tého řádku: world[y] # Pro získání x-tého prvku y-tého řádku: world[y][x] # Souřadnice J reprezentuje x-ovou souřadnici a K tu y-ovou. for j in range(x-1, x+2): for k in range(y-1, y+2): # Buňka si nemůže být sama sousedem if j == x and k == y: continue # Snažím se zasahovat do x-ové souřadnice mimo hrací pole if j < 0: # len(world[0]) vrátí počet prvků. Protože ale j bude # rovno -1 nebo méně (-2), dostanu se tímto vždy na # poslední (předposlední) index v rámci řádku j = len(world[0])+j # Snažím se na x-ové ose zasahovat mimo hrací pole if j > x_max: # Přetečení zpět na začátek j = 0 if k < 0: k = len(world)+k if k > y_max: k = 0 # Pokud je na k,l souřadnicích v předchozím kroku 1, přičte se, # v opačném případě se nic neděje :) count += world[k][j] return count
d780d79d31b99d1b753ed4a19bc3593e15a5af82
656,609
def is_pal(test): """ Returns bool of whether test is a palindrome. Does so iteratively """ # Reverse the test reverse_test = test[::-1] # Loop through to test test against reverse for i in range(len(test)): if test[i] != reverse_test[i]: return False return True
c709cef691717dc48d65885f5d3e3c00c2148f58
520,376
def shift_bytes(key: int, byte: int) -> int: """Subtract byte by key""" return byte - key
75b307efa34f4cda5119a53f93352d4f587d957b
103,496
def nt_to_tuple(nt): """Convert a namedtuple instance to a tuple. Even if the instance's __iter__ method has been changed. Useful for writing derived instances of typeclasses. :param nt: namedtuple instance. :returns: A tuple containing each of the items in nt """ return tuple(getattr(nt, f) for f in type(nt)._fields)
8642f02649611895791f8cf402d62a0774bcd44b
139,118
def check_msg(msg): """ Check if message contains Error or Warning for run_analysis functions. Args: msg (str): Message from server/client code Returns: err (bool): True for there is an error msg (str): Message informing the user if the program is running properly or there is an error or warning. """ err = bool(0) if 'Error' in msg: err = bool(1) elif "Warning" in msg: msg = 'Success! Warning: Image already exists. ' \ 'Processing ran on existing image' else: msg = 'Image saved successfully' return err, msg
5ca6214970dfb4ede4e3db6abe5db14e764cef55
438,804
def find_array_dim(dic, ddim): """ Find array dimension which corresponds to dictionary dimension. Parameters ---------- dic : dict Dictionary of RNMRTK parameters. ddim : int, non-negative Dimension in dictionary. Returns ------- dim : int Dimension in array which corresponds to dictionary dimension, ddim. """ dic_dims = [int(i[1]) for i in dic['layout'][1]] return dic_dims[ddim]
f8154853244935f92154711fdbae6f86b3d0ebde
148,901
def find_negative_temps(temps): """ Returns a new list with just the negative temperatures from temps. >>> find_negative_temps([-13, 45, -1, 0, 23]) [-13, -1] >>> find_negative_temps([]) [] >>> find_negative_temps([23, 36, 21]) [] >>> find_negative_temps([-30, -60, -10]) [-30, -60, -10] """ # Python allows function definitions inside other funcs def is_negative(temp): return temp < 0 return list(filter(is_negative, temps))
36c8d51b47edd38550128a4ae9b8913471ffd83f
441,954
import torch def create_pinhole(fx, fy, cx, cy, height, width, rx, ry, rz, tx, ty, tz): """Creates pinhole model encoded to a torch.Tensor. """ return torch.Tensor([ [fx, fy, cx, cy, height, width, rx, ry, rz, tx, ty, tz]])
2ffa87c9c7b243ff9769329d0e705a245cb52bef
643,944
def _formatted_table_to_dict(formatted_table): """Convert a single-row table with header to a dictionary""" headers = [ header.strip() for header in formatted_table[0].split(" ") if len(header) > 0 ] fields = [ field.strip() for field in formatted_table[1].split(" ") if len(field) > 0 ] return dict(zip(headers, fields))
0fafc769d3e9b74a605cd1af0516b8dea279b5f5
91,073
import re def number_aware_tokenizer(doc): """ Tokenizer that maps all numeric tokens to a placeholder. For many applications, tokens that begin with a number are not directly useful, but the fact that such a token exists can be relevant. By applying this form of dimensionality reduction, some methods may perform better. """ token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b') tokens = token_pattern.findall(doc) tokens = ["#NUMBER" if token[0] in "0123456789_" else token for token in tokens] return tokens
47048caaf9de5bf7fbcd166279cf095fffd6e31e
307,524
def summarize_lane(lane_lines, column_mapping): """Given a list of lines, summarize what they contain, returning a dict of { project: { pool: [ list of libs ] } } The caller is presumed to have filtered the lines by lane already. """ #Make a dict of dicts keyed on all the projects seen res = dict() for line in lane_lines: sample_project = line[column_mapping['sample_project']] sample_id = line[column_mapping['sample_id']] #Pool and library should be combined in the sample_id if '__' in sample_id: sample_pool, sample_lib = sample_id.split('__') else: sample_pool, sample_lib = '', sample_id # I used to set 'NoPool' to '' at this point but it turned out to be a bad idea. #Avoid use of defaultdict as it gums up YAML serialization. This is equivalent. res.setdefault(sample_project, dict()).setdefault(sample_pool, []).append(sample_lib) return res
a4100d0b65762a2859e0f775f48326b42fa78b68
605,471
def getDetailedChannelBoxAttrs(node): """ Return the list of attributes that are included when the 'detailed channel box' is enabled for a node. """ attrs = [ # rotate order 'ro', # rotate axis 'rax', 'ray', 'raz', # rotate pivot 'rpx', 'rpy', 'rpz', # scale pivot 'spx', 'spy', 'spz', # rotate pivot translate 'rptx', 'rpty', 'rptz', # scale pivot translate 'sptx', 'spty', 'sptz', ] if node.nodeType() == 'joint': attrs += [ # joint orient 'jox', 'joy', 'joz', ] return attrs
649eff52fcc43243891ce853732c2cf914ecc60a
23,889
def arithmetic_mean(values): """Zero-length-safe arithmetic mean.""" if not values: return 0 return sum(values) / len(values)
50c410ace6af28777d30cc6926bcf9652f6c639f
395,624
def train_test_split(data, target, train_ind, test_ind): """ Splits ML input and targets into training and test sets from the specified indices Parameters ---------- data : npy array of the input time series target : npy array of ML targets train_ind : list of training indices test_ind : list of test indicies Returns ---------- train : npy array of time series training set test : npy array of time series test set train_target : npy array of training targets test_target : npy array of test targets """ train = data[train_ind,:] train_target = target[train_ind] test = data[test_ind,:] test_target = target[test_ind] return train, test, train_target, test_target
0242f8d596f9d1efeee555cb2078fd3440fadd69
517,979
def read_table(filename, usecols=(0, 1), sep='\t', comment='#', encoding='utf-8', skip=0): """Parse data files from the data directory Parameters ---------- filename: string Full path to file usecols: list, default [0, 1] A list of two elements representing the columns to be parsed into a dictionary. The first element will be used as keys and the second as values. Defaults to the first two columns of `filename`. sep : string, default '\t' Field delimiter. comment : str, default '#' Indicates remainder of line should not be parsed. If found at the beginning of a line, the line will be ignored altogether. This parameter must be a single character. encoding : string, default 'utf-8' Encoding to use for UTF when reading/writing (ex. `utf-8`) skip: int, default 0 Number of lines to skip at the beginning of the file Returns ------- A dictionary with the same length as the number of lines in `filename` """ with open(filename, 'r') as f: # skip initial lines for _ in range(skip): next(f) # filter comment lines lines = (line for line in f if not line.startswith(comment)) d = dict() for line in lines: columns = line.split(sep) key = columns[usecols[0]].lower() value = columns[usecols[1]].rstrip('\n') d[key] = value return d
81e70a1db8530940d73cf8242b791c3cab473b9c
47,585
def calculate_depth_increases(depth_windows: list[int]) -> int: """Calculates the number of times the depth increases, compared to the previous depth band""" depth_increases: int = 0 for index, window in enumerate(depth_windows): if index == 0: continue else: if window > depth_windows[index - 1]: depth_increases += 1 return depth_increases
6dd55f1c408f45d98ed7cd05f3766a063d598a1d
147,792
def dict_get_element_by_index(dictionary: dict, index: int) -> str: """Get the key of a dictionary from an index. Args: dictionary (dict): The dictionary you want to find a key from. index (int): The key index you want to find. Returns: str: The value corresponding to the key index given. """ return list(dictionary.keys())[index]
72c952c9507f911d483d5768dad110777b515642
188,073
def first_and_last(a): """ Finds first and last elements in a list Args: a: the list Returns: The first and last elements in the list Raises: IndexError: if passed an empty list TypeError: if passed an object which is not a list """ if not isinstance(a, list): raise TypeError if len(a) < 2: raise IndexError return [a[0],a[-1]]
f79530f432de9d4775c033dd5dd8f841ff03b6fc
452,233
def exc_isinstance(exc_info, expected_exception, raise_not_implemented=False): """ Simple helper function as an alternative to calling `~.pytest.ExceptionInfo.errisinstance` which will take into account all the "causing" exceptions in an exception chain. Parameters ---------- exc_info : `pytest.ExceptionInfo` or `Exception` The exception info as returned by `pytest.raises`. expected_exception : `type` The expected exception class raise_not_implemented : bool, optional Whether to re-raise a `NotImplementedError` – necessary for tests that should be skipped with ``@skip_if_not_implemented``. Defaults to ``False``. Returns ------- correct_exception : bool Whether the exception itself or one of the causing exceptions is of the expected type. """ if exc_info is None: return False if hasattr(exc_info, 'value'): exc_info = exc_info.value if isinstance(exc_info, expected_exception): return True elif raise_not_implemented and isinstance(exc_info, NotImplementedError): raise exc_info return exc_isinstance(exc_info.__cause__, expected_exception, raise_not_implemented=raise_not_implemented)
7e53dd94b7326faea1fe5accdc60b1b3b003f0af
14,524
def get_return_losses(excitation_names, excitation_name_prefix=''): """Get the list of all the Returnloss from a list of exctitations. If no excitation is provided it will provide a full list of return Losses Example: excitation_names ["1","2"] is_touchstone_expression=False output ["S(1,1)",, S(2,2)] Example: excitation_names ["S(1,1)","S(1,2)", S(2,2)] is_touchstone_expression=True output ["S(1,1)",, S(2,2)] Parameters ---------- excitation_names : list of excitation to include excitation_name_prefix : (Default value = '') Returns ------- type list of string representing Return Losses of excitations """ spar = [] if excitation_name_prefix: excitation_names = [i for i in excitation_names if excitation_name_prefix.lower() in i.lower()] for i in excitation_names: spar.append("S({},{})".format(i, i)) return spar
99a38fe453c14956dfad30cdc504973fe900b849
556,979
def extract_axi_res_from_hls_rpt(rpt_path): """Extract the resource usage for AXI modules from the HLS report in text format Parameters ---------- rpt_path: str The path of HLS report Returns ------- BRAM18K, FF, LUT """ with open(rpt_path) as f: lines = f.readlines() BRAM18K_total = 0 FF_total = 0 LUT_total = 0 for line in lines: if line.find("kernel0_gmem_") != -1: line = line.split("|") BRAM18K_total += float(line[3]) FF_total += float(line[5]) LUT_total += float(line[6]) return BRAM18K_total, FF_total, LUT_total
7973d73323aadc9354ca84c443e7a42b64bb5a42
383,812
def multif0_to_timefreq(times, freqs): """Unroll a multif0 annotation of the form (t, [f1, f2, f3]) to a list of (t, f) where t may be repeated. Parameters ---------- times : list Time stamps freqs : list of lists all frequencies for a given time stamp Returns ------- t_unrolled : list Unrolled time stamps f_unrolled : list Unrolled frequency values """ t_unrolled = [] f_unrolled = [] for t, f_list in zip(times, freqs): for f in f_list: if f == 0: continue t_unrolled.append(t) f_unrolled.append(f) return t_unrolled, f_unrolled
aa0266e14bd2323926ff38cfca1b74897d75c137
305,068
def order_keys(hex_sessions): """ Returns list of the hex sessions in (rough) time order. """ orderedKeys = [] for key in sorted(hex_sessions.keys(), key=lambda key: hex_sessions[key][1]): orderedKeys.append(key) return orderedKeys
a680f0ffbfc322303e70b517e88a22f6f7e4fec0
402,448
def word_tokenizer(texts: list, tokenizer_obj) -> list: """ :param texts: list of sents ex: ["first sent", "second sent"] :param tokenizer_obj: :return: list of tokenized words ex: [["first", "sent"], ["second", "sent"]] """ return [tokenizer_obj(text) for text in texts]
270004efff28e53a6d969983101407ac3ecd0364
479,930
def minutes_seconds(seconds): """Method for timer time Returns a tuple of minutes and second for the timer""" return int(seconds / 60), int(seconds % 60)
f2c7d0e47bb00bbc01c3b4412f1c0352a94af513
206,916
def roi_center(roi): """Return center point of an ``roi``.""" def slice_center(s): return (s.start + s.stop) * 0.5 if isinstance(roi, slice): return slice_center(roi) return tuple(slice_center(s) for s in roi)
ae6f9fc6af535ed1643481fb7b38f4dd8aa6de72
640,998
from typing import OrderedDict def list_drop_duplicates(li: list, keep: str = 'first') -> list: """ Drop duplicates from a (ordered) list :param li: List to drop duplicates from :param keep: Keep first or last occurrence of the unique items """ if keep == 'first': return list(OrderedDict((x, True) for x in li).keys()) elif keep == 'last': li.reverse() li = list(OrderedDict((x, True) for x in li).keys()) li.reverse() return li else: raise ValueError(f'Cannot parse {keep} as argument for keep. This should be either "first" or "last"')
b43b59a7d6ea266843266ee3eb8d5af5eaf7bb33
36,287
def clamp(min_v, max_v, value): """ Clamps a value between a min and max value Args: min_v: Minimum value max_v: Maximum value value: Value to be clamped Returns: Returns the clamped value """ return min_v if value < min_v else max_v if value > max_v else value
1a9aaf3790b233f535fb864215444b0426c17ad8
709,101
import re def regexPattern(prefix = None, suffix = None, groups = None): """ Generates a regexPattern from user input: a column of entries separated by newline. Parameters: prefix: str. default None. prefix to each entry suffix: str. default None. suffix to each entry groups: str. default None. 'capture' : creates capturing groups around each entry 'noncapture' : creates noncapturing groups around each entry Requires regex as re Returns compiled regex pattern and prints as string """ regexEntries = input('Paste column of entries:') regexEntriesList = regexEntries.split("\n") revRegexList = [] for entry in regexEntriesList: if prefix != None: entry = prefix + entry if suffix != None: entry = entry + suffix if groups == 'capture': entry = '('+entry+')' if groups == 'noncapture': entry = '(?:'+entry+')' revRegexList.append(entry) pattern = re.compile(r'|'.join(revRegexList)) print("*****************************************") print(r'|'.join(revRegexList)) return pattern
e472a88aad2605c1d79f7cc79a5ca6b9fec40488
152,105
def get_api_url(job_id): """ Return the WDL PersecData API endpoint The endpoint returned by this module is: https://api.welldatalabs.com/persecdata/<job_id> Parameters ---------- job_id: str The job_id to search the PerSec API for Returns ------- url: str The PersecData API endpoing """ protocol = 'https' base_url = 'api.welldatalabs.com' endpoint = 'persecdata' url = f'{protocol}://{base_url}/{endpoint}/{job_id}' return url
20f6fd5951809be21ebabe4d3dd5c3dd315109d7
169,074
import re def find_next_nothing(body, current_nothing): """ Find the next nothing from the response body. """ m = re.search('and the next nothing is ([0-9]+)', body) if m is not None: return int(m.group(1)) else: if body == 'Yes. Divide by two and keep going.': # There is an exception to the rule where we are told to divide! return current_nothing / 2
713e2ef5b52cccf9b96de0965c489539e6a70b19
264,307
def letter_count(word, letter): """ Returns the number of instances of "letter" in "word" """ return word.count(letter)
17121bb6820db0ece553cc0021ad4daba70df945
663,016
def make_cutout(image, start_i, start_j, h_monster, w_monster): """ Create cutout from final image from coordinates (start_i, start_j) and as size as monster image. """ new_image = "" image = image.split("\n") for i_row in range(start_i, start_i + h_monster): new_image += image[i_row][start_j: start_j + w_monster] + "\n" return new_image
ae0b2a9635ebf64c7fb31dd5e5e2c93d5b11c3e6
114,943
import re def build_key_pattern(genus): """Build a regex pattern for the genus key Parameters: genus - the genus of the file (a string) The pattern has one subgroup: the genus and species name """ # --- Species name from index line --- # # Relies on the assumption that index lines have the following format # # n. <genus> <species> [(in part)]\n # # Where n is an arbitrary natural, genus is specified, species is a # lowercase word and "(in part)" doesn't necessarily appear. # # The key pattern matches two subgroups: # 1. The number that orders how the species appears in the text # 2. The genus and species name key_pattern = re.compile(r'(\d+)\.[ ]*('+genus+r' (?:x\\)?[a-z\-]+)'+ r'(?: \(in part\))?\s*\n', flags=re.MULTILINE) return key_pattern
c87787dbf2b25ba33c3b5da5b97860d6fd1271ae
671,104
def rds_product_engine_match(product, engine): """ Check whether an RDS reservation 'product' matches a running instance 'engine' """ return (product, engine) in (('postgresql','postgres'), ('mysql','mysql'), # note: not sure if this is correct )
4373ba6a51e5a5d80aeb4a410f60064becf2ede1
43,369
def restrictGender(mp, category, gender): """ Selects the minimal pairs that have a given gender For example, allows one to only keep pairs with masculine entries """ dropouts_1 = list(mp.loc[(mp.cgram_1 == category) & (~mp.genre_1.str.contains(gender,na=True))].index) dropouts_2 = list(mp.loc[(mp.cgram_2 == category) & (~mp.genre_2.str.contains(gender,na=True))].index) pairs = mp.drop(list(set(dropouts_1 + dropouts_2))) return pairs
ab3254e6edfa8ce158c0ea7b89b6af9229385c50
605,632
def season_months(season): """ Return list of months (1-12) for the selected season. Valid input seasons are: ssn=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'djf', 'mam', 'jja', 'son', 'mayjun', 'julaug', 'marapr', 'jjas', 'ond', 'ann'] """ ssn=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec', 'djf', 'mam', 'jja', 'son', 'mayjun', 'julaug', 'marapr', 'jjas', 'ond', 'ann'] imon = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, [1,2,12], [3,4,5], [6,7,8], [9,10,11], [5,6], [7,8], [3,4], [6,7,8,9], [10,11,12], range(1,13)] try: ifind = ssn.index(season.lower()) except ValueError: raise ValueError('Season not found! Valid seasons: ' + ', '.join(ssn)) months = imon[ifind] # Make sure the output is a list if isinstance(months, int): months =[months] return months
5c25922a6fd777599138b6b84db736fb8ace48a7
105,884
def get_page(request, current_url, website): """Looks up the current page and its title in the page configuration.""" page = {} title = 'Untitled' if request.matched_route: rname = request.matched_route.name try: title, page = next( (t, p) for t, p in website['pages'].items() if rname == p.get('route') ) except StopIteration: # Yield the default page/title above pass return dict(page, title=title)
2a5b7d3e9ebd966824701dbb65dbd1767afc3607
327,913
import torch def loss_kl(z_mean, z_logvar): """ Args: z_mean of shape (batch_size, n_components): Means of the approximate distributions of the codes. z_logvar of shape (batch_size, n_components): Log-variance of the approximate distributions of the codes. Returns: loss (torch scalar): Kullback-Leibler divergence. """ loss = (0.5 * torch.sum(torch.exp(z_logvar) + z_mean**2 - 1.0 - z_logvar))/z_mean.shape[0] return loss
99f2ba72ee14a2ac5a7cff68ec4db3da6625d059
307,953
def _format_ranges(ranges): """ Create a list of ranges. Range: [h1, s1, v1] to [h2, s2, v2] Examples -------- input: [ [[0, 5], [175, 180]], # hue, two ranges [100, 200], # saturation, only one range [50, 75] # value, only one range ] Because Hue has two ranges then the result has two ranges. Satur. and val. have only one range and so their first (and now only one) is duplicated to each result range. result: [ [[0, 100, 50], [5, 200, 75]], # first range [[175, 100, 50], [180, 200, 75]] # second ] """ # Make all len 2 if any one is len 2 if max([len(x) for x in ranges]) == 2: hues = ranges[0] if len(hues) == 1: hues *= 2 saturs = ranges[1] if len(saturs) == 1: saturs *= 2 vals = ranges[2] if len(vals) == 1: vals *= 2 else: hues, saturs, vals = ranges[0], ranges[1], ranges[2] new_ranges = [] for i in range(len(hues)): start = [hues[i][0], saturs[i][0], vals[i][0]] end = [hues[i][1], saturs[i][1], vals[i][1]] new_ranges.append([start, end]) return new_ranges
572b0731a8e4912a045ef7ff82ae5567087eda77
244,142
def statistics(prediction, ground_truth, beta=1): """Computes performance statistics for classifiers. Parameters ---------- prediction : set Set of objects predicted to be labeled positive. ground_truth : set Set of objects actually labeled positive. beta : float, optional Sets the beta for an F-beta score. Defaults to 1. Returns ------- (float, float, float) Tuple representing (precision, recall, f_beta). """ true_positives = ground_truth & prediction false_positives = prediction - ground_truth if len(prediction) == 0: # to avoid division-by-zero errors precision = 0.0 else: precision = len(true_positives) / (len(true_positives) + len(false_positives)) recall = len(true_positives) / len(ground_truth) if precision == 0.0 and recall == 0.0: # to avoid division-by-zero errors f_beta = 0.0 else: f_beta = (1 + beta ** 2) * (precision * recall) / ((beta ** 2 * precision) + recall) return (precision, recall, f_beta)
4b937a98a76431b979f7be33e732de82a46320bf
638,143
def tarball_name(spec, ext): """ Return the name of the tarfile according to the convention <os>-<architecture>-<package>-<dag_hash><ext> """ return "%s-%s-%s-%s-%s%s" % (spec.architecture, str(spec.compiler).replace("@", "-"), spec.name, spec.version, spec.dag_hash(), ext)
9822c3977db2afe52fd1f7c74af7d293e964084b
137,617
import typing import random def sort(array: list) -> list: """Quick sort implementation with random pivot point. """ total_length: int = len(array) if total_length < 2: return array pivot: typing.Any = array[random.randint(0, total_length - 1)] less_pivot: list = [] more_pivot: list = [] equal_pivot: list = [] for item in array: if item < pivot: less_pivot.append(item) elif item > pivot: more_pivot.append(item) else: equal_pivot.append(item) return sort(less_pivot) + equal_pivot + sort(more_pivot)
466cf6b38479d8876bfe25bd7705caa669cfffaa
581,860
import json def load_image_infos(img_dir, image_file): """ Loads the image filenames from visual genome from the JSON file that contains them. This matches the preprocessing in scene-graph-TF-release/data_tools/vg_to_imdb.py. Parameters: image_file: JSON file. Elements contain the param "image_id". img_dir: directory where the VisualGenome images are located Return: List of filenames corresponding to the good images """ with open(image_file, 'r') as f: im_data = json.load(f) img_infos = {} for i, img in enumerate(im_data): # align with coco format img['id'] = img['image_id'] img['file_name'] = img['filename'] img_infos[img['image_id']] = img return img_infos
9d2944ff93d6efdc9ecf9abba22b571f01a029f7
555,342
def convert_to_bert_vocab(vocab, items): """ Converts a sequence of [tokens|ids] using the vocab. Tokens not in dictionary are skipped. :param vocab: dictionary :param items: list of tokens (strings) :return: """ output = [] for item in items: try: output.append(vocab[item]) except KeyError: continue return output
603937693724ca27f48e019e324087cf5d7561aa
544,831
def getColor(x): """Selects an html color based on 0 <= x <= 100 Parameters ---------- x : float percent of injection to color visually. Higher the percent the darker the color Returns ---------- html color name useful for classifying """ if x >= 75: return "red" elif x >= 50: return "orange" elif x >= 25: return "yellow" elif x >= 5: return "lime" else: return "white"
d69e768f5e664872d2c2a603f4137a52a45bccb6
95,522
def sample(dist, n=None): """Sample n instances from distribution dist""" if n is None: return dist.rsample() else: return dist.rsample((n,))
03bc346d475b52c42d0f27c618d7f247c9a27ecf
282,535
def does_layer_accept_1d_feature(layer): """ Check if 1D feature values are valid for the layer. Parameters ---------- layer : object Returns ------- bool """ return (layer.output_shape == (1,))
8af9b16c54c17d0f46ebcf5ddd8044351f7210a4
319,383
import json def _is_valid_work_order_json(work_order_id, worker_id, requester_id, work_order_request): """ Validate following fields in JSON request against the ones provided outside the JSON - workOrderId, workerId, requesterId """ json_request = json.loads(work_order_request) if (work_order_id == json_request.get("workOrderId") and worker_id == json_request.get("workerId") and requester_id == json_request.get("requesterId")): return True else: return False
3e9eb6acb659744450886314a875c5396ff2b2b0
262,053
def keep_line(line): """Returns true for lines that should be compared in the disassembly output.""" return "file format" not in line
c8885ee67a8f884f60c913251c99c3eae42406c3
55,609
def _str_list(l): """Return a string representing list of strings.""" return " ".join(sorted(l, key=str.lower)) if l else "[none]"
bcd5087a8001f0bf78094e84e5942b25b7efb611
529,336
def _calc_target_bounding_box(bbox, source_shape, target_shape): """Calculates the bounding of the cropped target image. ``bbox`` is relative to the shape of the source image. For the target image, the number of pixels on the left is equal to the absolute value of the negative start (if any), and the number of pixels on the right is equal to the number of pixels target size exceeding the source size. Args: bbox (tuple[slice]): The len==3 bounding box for the cropping. The start of the slice can be negative, meaning to pad zeros on the left; the stop of the slice can be greater than the size of the image along this direction, meaning to pad zeros on the right. source_shape (tuple[int]): The spatial shape of the image to crop. target_shape (tuple[int]): The spatial shape of the cropped image. Returns: tuple[slice]: The bounding box of the cropped image used to put the extracted data from the source image into the traget image. """ target_bbox = list() for bounding, ssize, tsize in zip(bbox, source_shape, target_shape): target_start = 0 - min(bounding.start, 0) target_stop = tsize - max(bounding.stop - ssize, 0) target_bbox.append(slice(target_start, target_stop, None)) return tuple(target_bbox)
7e6a18761cc4f8455c6e9d44bd51944d3b329e7b
548,059
from math import floor, log10 def sci_notation(num, decimal_digits=1, precision=None, exponent=None): """ Returns a string representation of the scientific notation of the given number formatted for use with LaTeX or Mathtext, with specified number of significant decimal digits and precision (number of decimal digits to show). The exponent to be used can also be specified explicitly. """ if exponent is None: exponent = int(floor(log10(abs(num)))) coeff = round(num / float(10**exponent), decimal_digits) if precision is None: precision = decimal_digits return r"${0:.{1}f}\times$".format(coeff,precision)\ + "10" + r"$^{{{0:d}}}$".format(exponent)
4990887697d86a5ce5453f289c559991652c5269
695,723
def sqrt(dimensioned_number): """Take the square root of a Dn() instance. All of the dimension exponents must be divisible by 2. math.sqrt() is called on the numeric part of the Dn(). :param dimensioned_number: A Dn() instance. :returns: A Dn() instance. """ return dimensioned_number.sqrt()
ae52f2ad95a55bbbe4bf2bb8b44ed3cdbfbaf894
469,649
def chunk_list(l, n): """ Chunks list into N sized chunks as list of list. """ if n <= 0: raise ValueError('Chunk size of %s specified, which is invalid, must be positive int.' % n) results = [] for i in range(0, len(l), n): results.append(l[i:i + n]) return(results)
4f243e50341fffa6a2ad035591b0b7b74c9578dc
598,814
def bisect(v, midpoint): """Split ordered sequence *v* at the index *midpoint*.""" return (v[:midpoint], v[midpoint:])
314ae15e9a69216dd311700935ed310c37717e85
218,903
from datetime import datetime def str2DT(dt_string): """ convert a datetime expressed in the GNOME format to a datetime object """ day, month, year, hour, minute = [int(i) for i in dt_string.split(',')] dt = datetime(year, month, day, hour, minute) return dt
ff7812a4ba17ea3ce9389f5c4859e8cfde55659a
129,590
def _escape_arg(arg): """Put quotes around the input string, escaping slashes and quotes within.""" # A little bit of a hack: don't escape arguments that splice in another # shell command "$(...)". if arg.startswith("$(") and arg.endswith(")"): return arg # Escape backslashes. However, in the pattern "\$", leave it as-is so that we can # emit "$" without the shell evaluating them. For example: "-rpath=\$EXEC_ORIGIN/..." return "\"" + arg.replace("\\", "\\\\").replace("\\\\$", "\\$").replace("\"", "\\\"") + "\""
4cbe763724714f224f66b32eef8bdc3acb136a67
553,118
def largest(die): """Return the largest value die can take on.""" return max(die)
17ad51ecb0960d3c83c2c8c5e8e67465046d8745
23,484
def _CheckFileTimeoutMetaTags(f): """Checks if the given file has timeout meta tags.""" new_contents = f.NewContents() for line in new_contents: if 'name="timeout" content="long"' in line: return True return False
f6d6f2a972c579e8521a1bbcf2a98f1f98bd1e47
390,077
def random_floats(seed, min, max, n): """ Linear congruential random number generator to create n random float values in a range of [min,max] """ #Constants from Numerical Recipes #Modulus 2**32 m = 4294967296 a = 1664525 c = 1013904223 rand_max = m-1 rand_range = float(max-min) xi = seed%m xf = min+float(xi)/float(rand_max)*rand_range xl = [xf] for i in range(n-1): xi = (a*xi + c)%m xf = min+float(xi)/float(rand_max)*rand_range xl.append(xf) return xl
70403e8061c9aa860502f96a285f0e778858903a
173,183
def isoverlap(tup1, tup2): """ Determine if two intervals overlap. """ return (tup2[0] <= tup1[0] <= tup2[1]) or (tup1[0] <= tup2[0] <= tup1[1])
c0f9f7dcd0ece040bfa202e9ab572c9010088af8
576,223
def date_format_to_human(value): """ Convert Python format date to human. Example: >>> date_format_to_human('%Y-%m-%d') YYYY-MM-DD :param value: Date format example: %Y-%m """ maps = { '%y': 'YY', '%Y': 'YYYY', '%m': 'MM', '%d': 'DD', } for k, v in maps.items(): value = value.replace(k, v) return value
4f646763016db1e5b161334dbc39ab06ae76e5f7
617,668
def format_filesize(value): """ Return a human readable filesize for a given byte count """ if not value: return "" for x in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']: if value < 1024.0: return "%3.1f %s" % (value, x) value /= 1024.0 return value
17ed071f0c4cdf12cc66ed2da5396757e0292579
118,507
def create_edges(nodes, allow_partner=False): """ Given a list of Elf nodes, return a list of edges. allow_partner: if True, partners are also taken into account for possible gift making relationships. On default, partners are not allowed to be drawn as either elf or giftee of one another. """ edges = [ (node, neighbour) for node in nodes for neighbour in nodes if node != neighbour ] if not allow_partner: for node in nodes: for neighbour in nodes: if node.partner == neighbour.name: edges.remove((node, neighbour)) return edges
672315b290324678d16ccf94f8d116c632027966
604,735
def eq_12_dimensionless_hrr_line( Q_dot_l_kW_m: float, rho_0: float, c_p_0_kJ_kg_K: float, T_0: float, g: float, L_A: float, ) -> float: """Equation 12 in Section 8.3.2.2 PD 7974-1:2019 calculates dimensionless heat release rate for line fire source. Note dimension ratio should be less than 0.4 (i.e. L_A / L_B) to use line fire source correlation. :param Q_dot_l_kW_m: in kW/m, fire heat release rate per unit length along the line. :param rho_0: in kg/m^3, density of ambient air. :param c_p_0_kJ_kg_K: in kJ/kg/K, specific heat capacity of ambient air. :param T_0: in K, ambient air temperature. :param g: in m/s^2, acceleration due to gravity. :param L_A: in m, length of line shaped fire source. :return Q_dot_star_rect: dimensionless, dimensionless heat release rate """ # equation starts aa = Q_dot_l_kW_m bb = rho_0 * c_p_0_kJ_kg_K * T_0 * (g ** 0.5) * (L_A ** 1.5) Q_dot_star_line = aa / bb return Q_dot_star_line
b19ee1945596b5ef57a6899568dd89f16f7092cb
225,518
def text2caesar(text,shift = 3): """ Returns the encrypted text after encrypting the text with the given shift Parameters: text (str): The text that needs to be encrypted in Caesar's cipher shift (int): The shift that should be used to encrypt the text Returns: result (str): The encrypted text """ result = "" for i in range(len(text)): char = text[i] if char.isupper(): result += chr((ord(char) + shift-65) % 26 + 65) elif char.islower(): result += chr((ord(char) + shift - 97) % 26 + 97) else: result += char return result
84dc3307224c3773132b5aa33d4e9f31bd83dd64
67,201
from typing import Union def get_readable_size(num_bytes: Union[int, float]) -> str: """ Transform the bytes into readable value with different units (e.g. 1 KB, 20 MB, 30.1 GB). :param num_bytes: Number of bytes. :return: Human readable string representation. """ num_bytes = int(num_bytes) if num_bytes < 1024: return f'{num_bytes} Bytes' elif num_bytes < 1024 ** 2: return f'{num_bytes / 1024:.1f} KB' elif num_bytes < 1024 ** 3: return f'{num_bytes / (1024 ** 2):.1f} MB' else: return f'{num_bytes / (1024 ** 3):.1f} GB'
1dabd1ba1bd8e79c1e7b29774b54d7509dc5e040
369,841
def _m(*names: str) -> str: """Get module names""" return '.'.join(s for s in names if s)
228363e5103914cd5770e5d4d5638c0cbe54ff89
90,091
def newConstantFunction(val): """Returns a function which accepts any kind of parameter, does nothing and returns the value val.""" def constantFunction(*args, **kwargs): _ = args, kwargs return val return constantFunction
032b29945c3e0d325e12923e4c6f01b7b7fe171a
154,821
import xml.etree.cElementTree as ElementTree def process(data_stream): """ Process a diff file stream into a class with objects separated. Parameters ---------- data_stream : class A file-like class containing a decompressed diff file data stream. Returns ------- data_object : osc_decoder class A class containing attribute dictionaries for each OpenStreetMap object type, namely .nodes, .relations, and .ways. Relations that contain nodes not modified and therefore not included in the diff file are listed in .missingNds. """ def parse_diff(source, handle): for event, elem in ElementTree.iterparse(source, events=('start', 'end')): if event == 'start': handle.start_element(elem.tag, elem.attrib) elif event == 'end': handle.end_element(elem.tag) elem.clear() class osc_decoder(): def __init__(self): self.changes = {} self.nodes = {} self.ways = {} self.relations = {} self.action = "" self.primitive = {} self.missingNds = set() def start_element(self, name, attributes): if name in ('modify', 'delete', 'create'): self.action = name if name in ('node', 'way', 'relation'): self.primitive['id'] = int(attributes['id']) self.primitive['version'] = int(attributes['version']) self.primitive['changeset'] = int(attributes['changeset']) self.primitive['username'] = attributes['user'] self.primitive['uid'] = attributes['uid'] self.primitive['timestamp'] = attributes['timestamp'] self.primitive['tags'] = {} self.primitive['action'] = self.action if name == 'node': self.primitive['lat'] = float(attributes['lat']) self.primitive['lon'] = float(attributes['lon']) elif name == 'tag': key = attributes['k'] val = attributes['v'] self.primitive['tags'][key] = val elif name == 'way': self.primitive['nodes'] = [] elif name == 'relation': self.primitive['members'] = [] elif name == 'nd': ref = int(attributes['ref']) self.primitive['nodes'].append(ref) if ref not in self.nodes: self.missingNds.add(ref) elif name == 'member': self.primitive['members'].append({ 'type': attributes['type'], 'role': attributes['role'], 'ref': attributes['ref'] }) def end_element(self, name): if name == 'node': self.nodes[self.primitive['id']] = self.primitive elif name == 'way': self.ways[self.primitive['id']] = self.primitive elif name == 'relation': self.relations[self.primitive['id']] = self.primitive if name in ('node', 'way', 'relation'): self.primitive = {} data_object = osc_decoder() parse_diff(data_stream, data_object) return data_object
f7216b0ddecaec9b37018fa179a05f112893dba7
483,157
def nest_contains_list(nest): """Whether the nest contains list. Args: nest (nest): a nest structure Returns: bool: True if nest contains one or more list """ if isinstance(nest, list): return True elif isinstance(nest, tuple): for item in nest: if nest_contains_list(item): return True elif isinstance(nest, dict): for _, item in nest.items(): if nest_contains_list(item): return True return False
39459f822e0cb31a2fb1f28c225352b025c8f1c3
572,413
def task8(lancuch: str) -> bool: """ Function that receives a sequence of space separated 4 single digits (e.g. '1 2 3 4') as input and then check whether they as one number (1234) is divisible by 5 or not. Input: string with 4 digits Output: Boolean value (True or False) """ pierwsza = lancuch[0] #"1" druga = lancuch[2] #"2" trzecia = lancuch[4] czwarta = lancuch[6] razem = pierwsza + druga + trzecia + trzecia + czwarta # "1234" razem_liczba = int(razem) if razem_liczba % 5 == 0: return True else: return False
3d87bab6805b72ced5aac8886aa113988cd0b1e3
142,107
def is_task_tagged(task, tags, filters): """ Determine if given task match tag query. :param task: Task to check. :param tags: List of tags that should belong to the task. :param filters: List of tags that should not belong to the task. :return: True if task matches the query. """ if all(tag in task.tags for tag in tags): if not filters or not any(tag in task.tags for tag in filters): return True return False
54bd18213ab861154349d7c5fe7b7168685beb7e
410,903
def _remove_keys(keys, keys_to_remove): """Remove given keys from list of keys. Args: keys: List of keys to subtract from. keys_to_remove: List of keys to remove. Returns: A list of keys after subtraction. """ return list(set(keys) - set(keys_to_remove))
f72e4d7a46a1dd1680e9d9e86e5d01d185b1e1f7
512,455
import re def clean_page_skills(skills): """ Takes a string of skills from a dice job posting, cleans it, and returns the cleaned list of skills. Parameters ---------- skills: string string of skills section from dice job posting Returns ------- skills: list cleaned-up list of skills, each is a string """ skills = str.lower(skills.getText().strip('\n').strip('\t').strip('\n')) # .encode('ascii', 'ignore') skills = skills.split(',') # they are unicode, so convert to ascii strings (Python2 only) skills = [s.strip() for s in skills if s != 'etc'] skills = [re.sub('\s*etc\s+', '', s) for s in skills] skills = [re.sub('\s+etc\s*', '', s) for s in skills] return skills
4ec45c6d8e9d6e0407307c1ceb9b9b64e6023434
506,165
def set_fizz_buzz(node_value): """ Summary of set_fizz_buzz function: Evaluates the value of node_value and returns either 'FizzBuzz' if value is divisible by 3 and 5. 'Fizz' if the value is divisible by 3. 'Buzz' if the value is divisible by 5. Parameters: node_value (Node) : the current node Returns: Either 'FizzBuzz', 'Fizz' or 'Buzz' """ if node_value % 5 == 0 and node_value % 3 == 0: return 'FizzBuzz' elif node_value % 3 == 0: return 'Fizz' elif node_value % 5 == 0: return 'Buzz' else: return node_value
6d9ef6c97fdfa830a5f09c7aa64d6b4616406692
174,624