content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def parseUnit(formatString, undefined="NaN"): """Returns the unit of data item from MXElectric data message enclosed in []. Returns empty string as parse error. If the unit is not defined, string parameter undefined can be used to set it (defaults to NaN).""" opening = formatString.find('[') closing = formatString.find(']') if closing < 0 or opening < 0: # did not find both brackets, returning empty string return "" elif closing - opening <= 1: # brackets in the wrong order or no unit defined, returning undefined return undefined else: return formatString[opening+1:closing]
05d089d450b9de30fa31b808f787b8c581f3ec4f
346,022
def clean_shape(shape): """ Standardizes the shape. """ # Standardize shapes to lower case, merge the obvious ones. new_shape = shape.lower() if shape else None if new_shape == "triangular": new_shape = "triangle" if new_shape == "changed": new_shape = "changing" return new_shape
b942bf5320fb278840a5b7cedc0657027f998b83
315,246
import tkinter def resize_image(image_location: str, height: int, width: int) -> tkinter.PhotoImage: """Given file location, and height/width inputs, shrinks image by a factor of the input. Ex: Current = 120 and 90, input = 10,10 -> result = 12, 9. Returns resized image. """ image = tkinter.PhotoImage(file = image_location) w_scale = int(int(image.width())/width) h_scale = int(int(image.height())/height) new_image = image.subsample(w_scale,h_scale) return new_image
6a4d17d3d858b7c6ca8c7bd50432e84b7bd3bf9a
511,716
def node_name(node): """ Returns the name of the given node :param node: str :return: str """ return node
b3adfc290eb935bacdd714373debff648da943d3
342,498
def option(name, value = None): """Compose a command line option""" result = ' --' + name if value: result += '=' + str(value) return result
a131daa4c608bb02c44c2c4bb7caf0f504e5556a
508,163
import math def get_dimensions(rows, size): """ Count number of cols and rows for topology :param rows: number of rows :param size: number of process :return: number of rows and cols """ comm_cols = int(math.sqrt(rows)) while comm_cols > 1 and size % comm_cols: comm_cols -= 1 return int(size / comm_cols), comm_cols
f8e98c72cc43e5540c65821899ea617dcaeda1c4
314,795
import importlib def create_object(specs, **kwargs): """ Creates an object based on given specs. Specs include information about module and function necessary to create the object, as well as any additional input parameters for it. Args: specs (dict): dictionary containing the following keys: module_name: specifies from which module an object comes. function_name: specifies the name of the function used to create object. Returns: object: object of any type """ module_name = specs.pop("module_name") module = importlib.import_module(module_name) creator_name = specs.pop("function_name") creator = getattr(module, creator_name) created_object = creator(**specs, **kwargs) return created_object
e6ba87bec07c994835925ffc4439e7e73de15662
153,827
def make_dico_contig_lengths_from_bam_handler(bamh): """ create a dictionary of contig with their length as values :param bamh: bam handle from pysam bam/cram reader :return: dictionary of contigs:lengths """ return dict(zip(bamh.references, bamh.lengths))
edf755edf3fc418c895508e17a91d5432417ba73
244,430
import hashlib def get_sha512_hashdigest(file_path): """ Returns the SHA512 hex digest for the given file. :param file_path: Path to the file. :return: SHA512 hex digest. """ file_hash = hashlib.sha512() with open(file_path, 'rb') as f: while True: data = f.read(1024) if not data: break file_hash.update(data) return file_hash.hexdigest()
56cae3c6082a02dbba8f5218338cee99f3585f35
321,574
def serialize_biomass_v2(analysis, type): """Convert the output of the biomass_loss analysis to json""" return { 'id': None, 'type': type, 'attributes': { 'biomassLoss': analysis.get('biomassLoss', None), 'biomassLossByYear': analysis.get('biomassLossByYear', None), 'cLossByYear': analysis.get('cLossByYear', None), 'co2LossByYear': analysis.get('co2LossByYear', None), 'areaHa': analysis.get('area_ha', None) } }
caf1518d8f318415fb0305c7cbc74d9280e97217
584,236
def _error_matches_criteria(error, criteria): """ Check if an error matches a set of criteria. Args: error: The error to check. criteria: A list of key value pairs to check for in the error. Returns: A boolean indicating if the provided error matches the given criteria. """ for key, value in criteria: if error.get(key) != value: return False return True
8f52f7288fdefa496084b4faf689ed269360050a
9,220
def get_encoding_table(letters): """Defines the mapping between plain and cipher text""" if letters: return { '1': 'nine', '2': 'eight', '3': 'seven', '4': 'six', '5': 'zero', '6': 'four', '7': 'three', '8': 'two', '9': 'one', '0': 'five' } return { '1': '9', '2': '8', '3': '7', '4': '6', '5': '0', '6': '4', '7': '3', '8': '2', '9': '1', '0': '5' }
4c2bd38e3d25c4ac9bf9f91e39d0d7ae9a9d6bd3
581,590
import string def letter_part(n): # takes quotient and index """ Function to calculate the lettered part for a given number of parts n Eg. letter_part(1) = 'a', letter_part(26) = 'z', letter_part(27) = 'az', letter_part(702) = zz this is the max Values for n > 702 return None :param n:number of parts :return: returns a string of length 1 or 2 ranging from a to zz. Returns None for values greater than zz """ q, i = divmod(n, 26) # divmod returns quotient and modulus if i == 0: # Fix special case for z, n=26 returns (1,0), but we want (0,25) q -= 1 i = 25 # convert i to index for az string. else: i -= 1 # convert i to index for az string. result = None # set default result to None az = string.ascii_lowercase # 'abcdefghijklmnopqrstuvwxyz' if q == 0: # quotient is 0, first time through a-z result = az[i] # one letter result, eg. 'a' elif 26 >= q >= 1: # quotient is 1 or more, beyond q=26 we would need three letters. result = az[q-1]+az[i] # two letter result, eg. 'aa' elif q > 26: pass # will return default None return result
42af53ade009abf86219f42849de9fdf7d56bc2d
646,130
def type_size(type_): """ Given a type object, return the size """ if type_ in ["void"]: return 0 if type_ in ["int", "float"]: return 4 if type_ in ["long", "double"]: return 8 return type_._size_()
88ee73a2379bf3048a7055db8606a461eb6d08d8
412,954
def str2data(str): """ Convert a string to some data bytes. An empty or None value is returned unchanged (helpful for testing), e.g.: 'WunderbarIR' -> '57 75 6e 64 65 72 62 61 72 49 52' '' -> '' """ if not str: return str data = ' '.join([hex(ord(c))[2:] for c in str]) return data
81605b120a71bd70120d6946eaa2d8c22dd04660
194,352
def set_batch(input_shape, batch): """Get the input shape with respect to a specified batch value and an original input shape. Parameters ---------- input_shape : tuple The input shape with batch axis unset. batch : int The batch value. Returns ------- ishape : tuple The input shape with the value of batch axis equal to batch. """ return [batch if s == -1 else s for s in input_shape]
cfa171443392daf496c53e4630f5e3302d970d26
147,669
def get_rs_energies(output_file_lines): """ Get the distances and energies from the 2D ORCA output file :param output_file_lines: (list(str)) :return: (list(tuple)), (list(float)) """ print('Extracting data') r1s, r2s, energies = [], [], [] energies_section = False for n, line in enumerate(reversed(output_file_lines)): if n > 2: if 'The Calculated Surface using the SCF energy' in output_file_lines[len(output_file_lines)-n+1]: energies_section = True if "The Calculated Surface using the 'Actual Energy'" in line: break if energies_section: r1, r2, energy = line.split() r1s.append(float(r1)) r2s.append(float(r2)) energies.append(float(energy)) rel_energies = [627.5*(e - min(energies)) for e in energies] return r1s, r2s, rel_energies
fe1e8e7189027eb26740bde175fbe9a017cb54d1
141,838
def h2_html(text: str) -> str: """Embed text in subheading tag.""" return "<h2>{}</h2>".format(text)
ca68a76c2fc040ce9a562eb9fb6815c2552a8891
294,809
import json def read_json(path): """Read data from JSON file.""" with open(path, 'r') as file: return json.load(file)
f4a991102e67c23b4d6811ebb12e77e461870dfd
618,546
import pickle def _read_python_plot_info(filename): """ Read the information required for a python plot for the given filename. The data for this are assumed to be in: ./data/results/filename_mse.p, ./data/results/filename_mse_test.p, ./data/results/filename_correct_results.p and ./data/results/filename_weights.p Afterwards, one can call _create_python_plot(mse, mse_test, weights, correct_weights) :param filename: The filename from which to obtain the mse, mse_test, correct weights and found weights. :type filename: str :return: The mse for each epoch, the mse of the test set for each epoch, the correct weights and the weights found in each epoch. :rtype: list[float], list[float], dict[Term,float], list[dict[Term, float]] """ mse_filename = './data/results/' + filename.replace('.pl', '') + '_mse.p' mse_test_filename = './data/results/' + filename.replace('.pl', '') + '_mse_test.p' correct_weights_filename = './data/results/' + filename.replace('.pl', '') + '_correct_results.p' found_weights_filename = './data/results/' + filename.replace('.pl', '') + '_weights.p' with open(mse_filename, 'rb') as f: mse = pickle.load(f) with open(mse_test_filename, 'rb') as f: mse_test = pickle.load(f) with open(correct_weights_filename, 'rb') as f: correct_weights = pickle.load(f) with open(found_weights_filename, 'rb') as f: found_weights = pickle.load(f) return mse, mse_test, correct_weights, found_weights
75d970385bc92ab4d874832b72f442608677f57f
75,595
def ubfx(value, lsb, width): """Unsigned Bitfield Extract""" return (value >> lsb) & ((1 << width) - 1)
bba3b92deea9105acc6554d235230711d1979c5f
672,818
import re def parse_metrics(log_file, metric_names): """ Parse the metric values from the log file. Note that we use the last matched value in the log file. For example, when metric is 'best_acc1', we will match the last line that look like - best_acc1 = 78.5 - best_acc1: 43 - best_acc1 100. :param log_file: (str) the name of the log file :param metric_names: (list(str)) the querying metric names :return: """ values = [] for metric_name in metric_names: metric_pattern = re.compile(metric_name + '[ =:]*([-+]?[0-9]*\.?[0-9]+)') value = "" with open(log_file, "r") as f: for line in reversed(f.readlines()): result = metric_pattern.search(line) if result: # find the latest value value = result.group(1) break values.append(value) return values
50111f9c5a3466caa804e0f64da49417aea1b899
539,850
import functools import pywintypes def use_worksheet(ws_name): """Decorator to wrap ExcelWorkbookTestCase tests. Before running the test body, the active worksheet is changed to that with the name specified.""" def _wrap_set_worksheet(f): @functools.wraps(f) def _wrapped(self, *args, **kwargs): xlwb = self.workbook.xlWorkbook try: xlSheet = xlwb.Sheets(ws_name) xlSheet.Activate() except pywintypes.com_error as e: raise ValueError("Failed to change active worksheet in @use_worksheet", ws_name, e) f(self, *args, **kwargs) return _wrapped return _wrap_set_worksheet
facc8d7a25c92cf19a8f401a4de0f139d6220a8e
560,461
def inspect_chain(chain): """Return whether a chain is 'GOOD' or 'BAD'.""" next_key = chain.pop('BEGIN') while True: try: next_key = chain.pop(next_key) if next_key == "END": break except KeyError: return "BAD" if len(chain) > 0: return "BAD" return "GOOD"
083886aa31fa81cc90d4c7bd30149c5575a5a675
21,613
def absolutelyNonDecreasing(buffer,item,attempts): """ Stops after the buffer has seen an absolute value larger than the one being searched for. The example halting condition given in the documentation. """ if abs(buffer._cache[-1])>abs(item): return True return False
c96f593b47b2d4c695959b7ca494ffd757bada63
643,501
from functools import reduce def find_longest_common_prefix_reduce(words:list): """ Find the lcp in a list of words, using 'reduce' functions. """ if not words: return '' def common_start(w1, w2): shorter = w1 if len(w1) < len(w2) else w2 for i in range(0, len(shorter)): if w1[i] != w2[i]: return shorter[:i] return shorter return reduce(common_start, words)
52ef4553bea70b879f8300e41f540cbe1069391b
93,186
def _compress_str(s, spaces_to_drop): """Remove `spaces_to_drop` spaces from `s`, alternating between left and right""" assert s.count(" ") >= spaces_to_drop from_left = True l = 0 r = len(s) drop = set() remaining_spaces = spaces_to_drop while remaining_spaces > 0: if from_left: l = s.find(" ", l) drop.add(l) l += 1 # since `s.find` is inclusive, but we need exclusive else: r = s.rfind(" ", 0, r) drop.add(r) from_left = not from_left remaining_spaces -= 1 assert len(drop) == spaces_to_drop return ''.join([l for (i, l) in enumerate(s) if i not in drop])
b7cf1e1e55c319dffe6579ac49a2db18431e9dfb
68,156
def qualifications(config): """Format participant qualificiations""" qualifications = [] # Country of origin cfg = config['crowdsource']['filter'] if 'countries' in cfg: locales = [{'Country': country} for country in cfg['countries']] qualifications.append({ 'QualificationTypeId': '00000000000000000071', 'Comparator': 'In', 'LocaleValues': locales, 'RequiredToPreview': True}) # Number of approved tasks if 'approved_tasks' in cfg and cfg['approved_tasks'] > 0: qualifications.append({ 'QualificationTypeId': '00000000000000000040', 'Comparator': 'GreaterThan', 'IntegerValues': [cfg['approved_tasks']], 'RequiredToPreview': True}) # Approval rating if 'approval_rating' in cfg and cfg['approval_rating'] > 0: qualifications.append({ 'QualificationTypeId': '000000000000000000L0', 'Comparator': 'GreaterThanOrEqualTo', 'IntegerValues': [cfg['approval_rating']], 'RequiredToPreview': True}) return qualifications
aa47023851213dfe1e61c9e1e7fda5fa2c2439bd
305,153
def intersection(bbox1, bbox2): """ Return a bbox of the intersection of bbox1 and bbox2. """ llx = max(bbox1[0], bbox2[0]) lly = max(bbox1[1], bbox2[1]) urx = min(bbox1[2], bbox2[2]) ury = min(bbox1[3], bbox2[3]) return llx, lly, urx, ury
c1fccd93b7aab48bd8506e6ca4491184a9fe55f8
457,921
def compose_functions(outer, inner): """Compose two functions""" return lambda x: outer(inner(x))
9080f6a11256711bf5205f546282fb93c4b7c8d2
401,006
from typing import Dict import importlib def run_function_from_path(function_path: str, kwargs: Dict): """ Runs a function given its path ... Parameters ---------- function_path: str the path to the function to be executed kwargs: Dict kwargs passed to to the function to be executed """ module_name, function_name = function_path.rsplit('.', 1) module = importlib.import_module(module_name) task = getattr(module, function_name) return task(**kwargs)
04ea02efad69544369572d7782c4c0a321c8ff12
529,718
def format_for_IN(l): """ Converts input to string that can be used for IN database query """ if type(l) is tuple: l = list(l) if type(l) is str: l = [l] return "(" + ','.join(['"' + str(x) + '"' for x in l]) + ")"
a2a755041c49af612e13e11da8148344b64a7926
654,547
def complement_strand(sequence): """ Returns the string which will be the second strand of the DNA sequence given that Ts complement As, and Cs complement Gs. If given a bad input, the function returns "Sequencing Error" :param sequence: A DNA sequence :return: the complement string for the DNA sequence """ complement = "" # This can be used to "build" the complement letter_dictionary = {"A": "T", "C": "G", "T": "A", "G": "C"} for letter in sequence: if letter in letter_dictionary: complement += letter_dictionary[letter] else: return "Sequencing Error" return complement
3857c2669313d521be0d09a9c4d4d3c000d97c9d
113,930
def have_matching_types(a, b, type_or_types): """True if a and b are instances of the same type and that type is one of type_or_types. """ if not isinstance(a, type_or_types): return False return isinstance(b, type(a))
3a9388d4c55365c6513f34dbefa27d1b0700ceca
81,357
def _median(values): """Return the median of a list of values""" n = len(values) if n < 1: return 0.0 if n % 2 == 1: return sorted(values)[n//2] return sum(sorted(values)[n//2-1:n//2+1])/2.0
2d7d4def43370e5121076136dd1d074b5869ff74
371,010
import binascii def bytes2hex(bytes_data: bytes) -> str: """转换二进制字符串数据为十六进制字符串表示 Args: str_data: 字节流, 类型: bytes Returns: 二进制对应的十六进制字符串 """ return binascii.b2a_hex(bytes_data).decode()
fb0d23b7dc032e3a8bde809c15c1de46d693c816
288,138
import json def getNodeGroups(session, url, details=False): """ Return a list of node group objects (by the index endpoint). Passing details=True will get all information for each node group. """ groups = [] page = 1 per_page = 100 done = False while not done: new_node_groups = session.get("{}/api/v2/node_groups.json".format(url), params={"page": page, "per_page": per_page}).json() groups += new_node_groups page += 1 done = True if len(new_node_groups) < per_page else False if details: detailed_groups = [] for group in groups: detailed_group = session.get("{}/api/v2/node_groups/{}.json".format(url, group["id"])).json() detailed_group["scan_options"] = json.loads(detailed_group["scan_options"]) detailed_groups.append(detailed_group) return detailed_groups return groups
d865f1e99cee94410b68bee5c4384bbcd4d4a52f
657,782
def trim_value(value): """ Trims the value, so that abs(value)<20, Values bigger then 20 cause numerical instability when calculating logits, The result value dosen't change for 20 upwards :param value: :return: """ if value>20: return 20 if value<-20: return -20 return value
31122c498a77a864b882538bb30eab0476cfd448
473,326
def darken(colour, amount): """ Darken a colur by a given amount. The amount ranges from 0 to 1, with 0 being black and 1 being unchanged. """ r, g, b = colour return r*amount, g*amount, b*amount
84d6cadd2b1c6c0714910b3ab17de549c4a92d92
430,240
def params_to_string(num_params, units=None, precision=2): """Convert parameter number into a string. Args: num_params (float): Parameter number to be converted. units (str | None): Converted FLOPs units. Options are None, 'M', 'K' and ''. If set to None, it will automatically choose the most suitable unit for Parameter number. Default: None. precision (int): Digit number after the decimal point. Default: 2. Returns: str: The converted parameter number with units. Examples: >>> params_to_string(1e9) '1000.0 M' >>> params_to_string(2e5) '200.0 k' >>> params_to_string(3e-9) '3e-09' """ if units is None: if num_params // 10 ** 6 > 0: return str(round(num_params / 10 ** 6, precision)) + " M" elif num_params // 10 ** 3: return str(round(num_params / 10 ** 3, precision)) + " k" else: return str(num_params) else: if units == "M": return str(round(num_params / 10.0 ** 6, precision)) + " " + units elif units == "K": return str(round(num_params / 10.0 ** 3, precision)) + " " + units else: return str(num_params)
a181a142aa1886dfaaebf0665908244764d69cde
582,423
def generate_range(min: int, max: int, step: int) -> list: """ This function generates a range of integers from min to max, with the step. """ return [i for i in range(min, max + 1, step)]
e0d0871d52b6ae671bfdcefb15c7bf55bc71fcfa
57,012
def _which(repository_ctx, cmd, default = None): """A wrapper around repository_ctx.which() to provide a fallback value.""" result = repository_ctx.which(cmd) return default if result == None else str(result)
bd971599fbb77bf7eb504946ef2f901e877ed9b1
690,565
import hashlib def hash160(s: bytes) -> bytes: """ sha256 followed by ripemd160 :param s: data :return: hashed data """ return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
7b18fcdf51db707a17d5408c7b364818a6c5ee0c
704,668
import typing def is_optional(field: typing.Any) -> bool: """Returns boolean describing if the provided `field` is optional.""" return typing.get_origin(field) is typing.Union and type(None) in typing.get_args( field )
579dff90ca7ef0a5cb19893e07414eb0a6172c90
511,584
def encode_binary(x, width): """Convert integer x to binary with at least width digits.""" assert isinstance(x, int) xb = bin(x)[2:] if width == 0: assert x == 0 return '' else: assert len(xb) <= width pad = width - len(xb) return '0' * pad + xb
61a5c1e933f495f4347e0d89490a02b6e9630f6e
675,828
def acnucseq_from_pdb(input_chain): """ Given an input DNA chain, returns a string with its nucleotide sequence. """ acnucseq = "" for res in input_chain: if len(res.get_resname()) > 1: acnucseq += str(res.get_resname().rstrip()[2:]) else: acnucseq += str(res.get_resname()).rstrip() return acnucseq
5c0cf59a735e693952ad030f24e8d4eb07adfc4c
136,107
import re def cleanPath(node): """Return the substring of a string matching chars approved for use in our URL paths.""" return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
1dd59bb4dcc462930b25307869294ad7a025bd09
104,704
def find_unique(a, b): """ :param a: Iterable number 1. :type a: list, tuple :param b: Iterable number 2. :type b: list, tuple :return: List of unique objects from both ``a`` and ``b``. :rtype: list Example: -------------------------- .. code-block:: python >>> list_1 = [1, 2, 3] >>> list_2 = [1, 5, 2] >>> unique_items = find_unique(list_1, list_2) >>> print(unique_items) [3, 5] >>> type(unique_items) <class 'list'> """ set_a = set(a) set_b = set(b) unique = set_a - set_b unique |= set_b - set_a return list(unique)
4e5d28b1126b379b9e5978d85402f952b238e0ce
283,123
def str2dict(strdict): """Convert key1=value1,key2=value2,... string into dictionary. :param strdict: key1=value1,key2=value2 Note: This implementation overrides the original implementation in the neutronclient such that it is no longer required to append the key with a = to specify a corresponding empty value. For example, key1=value1,key2,key3=value3 key1 key1,key2 will also be supported and converted to a dictionary with empty values for the relevant keys. """ if not strdict: return {} return dict([kv.split('=', 1) if '=' in kv else [kv, ""] for kv in strdict.split(',')])
76fc5e3d4957713426af9121dac3e461964ec0f7
621,785
def create_column_features(features, window_size): """Create column names from list of features and window size""" columns = [] for i in list(range(window_size)) + ['y']: for f in features: columns.append(f+'_'+str(i)) return columns
62000c468f05ab3668fbd3f3305f48df0485949c
219,236
def advance_time_step(model, env, brain_name, states, actions, rewards, next_states, dones): """Advances the agents' model and the environment to the next time step, passing data between the two as needed. Params model (Maddpg): the MADDPG model that manages all agents env (UnityEnvironment): the environment object in which all action occurs brain_name (string): an index into the Unity data structure for this environment states (ndarray): array of current states of all agents and environment [n, x] actions (ndarray): array of actions by all agents [n, x] rewards (list): list of rewards from all agents [n] next_states (ndarray): array of next states (after action applied) [n, x] dones (list): list of done flags (int, 1=done, 0=in work) [n] where, in each param, n is the number of agents and x is the number of items per agent. Returns: tuple of (s, a, r, s', done) values """ # Predict the best actions for the current state and store them in a single ndarray actions = model.act(states) #returns ndarray, one row for each agent # get the new state & reward based on this action env_info = env.step(actions)[brain_name] next_states = env_info.vector_observations #returns ndarray, one row for each agent rewards = env_info.rewards #returns list of floats, one for each agent dones = env_info.local_done #returns list of bools, one for each agent # update the agents with this new info model.step(states, actions, rewards, next_states, dones) # roll over new state states = next_states return (states, actions, rewards, next_states, dones)
11b1e0249931618e807df6783224c07b8e405ca2
516,398
def generate_glob_by_extension(extension): """ Generates a glob that matches the given extension, case-insensitively. Example ------- For '.po' files, the generated glob is '*.[pP][oO]' """ extension = extension.lstrip(".") case_insensitive_char_list = ["[{0}{1}]".format(char, char.upper()) for char in extension] glob = "".join(case_insensitive_char_list) return "*.{0}".format(glob)
a65f618eb11e6d95d43fece54c7b33e306e298df
97,934
def find_opposite_axes(axes, ndim): """ Based on the total number of dimensions function finds all axes that are missed in the specified list ``axes``. Parameters ---------- axes : list or tuple Already known axes. ndim : int Total number of dimensions. Returns ------- list Examples -------- >>> from neupy.layers.normalization import find_opposite_axes >>> find_opposite_axes([0, 1], ndim=4) [2, 3] >>> >>> find_opposite_axes([], ndim=4) [0, 1, 2, 3] >>> >>> find_opposite_axes([0, 1, 2], ndim=3) [] """ if any(axis >= ndim for axis in axes): raise ValueError("Some axes have invalid values. Axis value " "should be between 0 and {}".format(ndim)) return [axis for axis in range(ndim) if axis not in axes]
491f340bbbb37da7d9cc58b522424ff85bc547cb
585,637
def _sleep_time(iter): """Return the time-to-sleep for the n'th iteration of a retry loop. This implementation increases exponentially. :param iter: iteration number :returns: number of seconds to sleep """ if iter <= 1: return 1 return iter ** 2
6abd614bbabc872758049ea35d9ee0ebafd0f2ba
29,102
def max_unit_id_by_plant(gens_df): """Identify the largest unit ID associated with each plant so we don't overlap. The PUDL Unit IDs are sequentially assigned integers. To assign a new ID, we need to know the largest existing Unit ID within a plant. This function calculates that largest existing ID, or uses zero, if no Unit IDs are set within the plant. Note that this calculation depends on having all of the pre-existing generators and units still available in the dataframe! Args: gens_df (pandas.DataFrame): A generators_eia860 dataframe containing at least the columns plant_id_eia and unit_id_pudl. Returns: pandas.DataFrame: Having two columns: plant_id_eia and max_unit_id_pudl in which each row should be unique. """ return ( gens_df[["plant_id_eia", "unit_id_pudl"]] .drop_duplicates() .groupby("plant_id_eia") .agg({"unit_id_pudl": max}) .fillna(0) .rename(columns={"unit_id_pudl": "max_unit_id_pudl"}) .reset_index() )
55bff1d221f23802f16b1cf2464d6fd494267aaf
579,221
def form_fastqc_cmd_list(fastqc_fp, fastq_fp, outdir): """Generate argument list to be given as input to the fastqc function call. Args: fastqc_fp(str): the string representing path to fastqc program fastq_fp(str): the string representing path to the fastq file to be evaluated outdir(str): the string representing the path to the output directory Return value: call_args(list): the list of call_args representing the options for the fastqc subprocess call Raises: ValueError is raised when either the fastqc path or the fastqc input files are empty """ # throw exceptions to prevent user from accidentally using interactive fastqc if fastqc_fp is '': raise ValueError('fastqc_fp name is empty') if fastq_fp is '': raise ValueError('fastq_fp file name is empty') # required arguments call_args_list = [fastqc_fp, fastq_fp] # direct output if outdir is not None: call_args_list.extend(["--outdir", outdir]) return call_args_list
ce0ed8eb7d35bdd2565f910bb982da710daa23c5
41,495
def get_start_end(sequence, skiplist=('-', '?')): """Return position of first and last character which is not in skiplist. Skiplist defaults to ['-','?']. """ length = len(sequence) if length == 0: return None, None end = length - 1 while end >= 0 and (sequence[end] in skiplist): end -= 1 start = 0 while start < length and (sequence[start] in skiplist): start += 1 if start == length and end == -1: # empty sequence return -1, -1 else: return start, end
b84fd32268cba2f0257428c92b426a899f819ba2
479,490
def make_dist_table(d1, d2): """ Makes a nicely formated table showing the control and target servers' answer distribution side by side. Inputs: - d1 : dict, control server distribution - d2 : dict, target server distribution Returns: - side_by_side : str, nicely formatted table showing the distributions side by side """ a1 = set(d1) a2 = set(d2) answers = a1 | a2 side_by_side = ( "Answer".center(50, "_") + "|" + "Control".center(10, "_") + "|" + "Target".center(10, "_") + "|\n" ) for ans in answers: if len(str(ans)) > 45: ans_str = str(ans[:45]) else: ans_str = str(ans) side_by_side = ( side_by_side + ans_str.center(50, ".") + "|" + str(d1.get(ans, 0)).center(10, ".") + "|" + str(d2.get(ans, 0)).center(10, ".") + "|\n" ) return side_by_side
8eaff8fdfda7baa6e5dc45e499b26c7ea8ef3d5c
461,879
def _add_tag(tags, label: str) -> bool: """Adds the tag to the repeated field of tags. Args: tags: Repeated field of Tags. label: Label of the tag to add. Returns: True if the tag is added. """ for tag in tags: if tag.label == label: # Episode already has the tag. return False tags.add().label = label return True
932399e97ae823ef0922929dc5123a587c06b211
41,680
def simplify_rating(d): """ Removes some keys from a flattened rating dict """ keys_to_delete = [] for key in d.keys(): if key.endswith(".type") or key.endswith(".max_score"): keys_to_delete.append(key) for key in keys_to_delete: del d[key] return d
b61b898f01622902b09c1c457fbf20f10277de29
529,712
def merge_regions(regions): """Coalesce regions. Scans a sorted list of region starting and ending positions looking for the outer-most start and end positions to coalesce overlapping and contained regions into a smaller list of larger regions. Parameters ---------- regions : list of tuples List of (start, end) position integers. Returns ------- regions : list of tuples List of merged (start, end) position integers. Examples -------- >>> # Empty list >>> merge_regions([]) [] >>> # Only one region >>> merge_regions([(10,20)]) [(10, 20)] >>> # Discard contained region at left >>> merge_regions([(10,20), (10,15)]) [(10, 20)] >>> # Discard contained region at right >>> merge_regions([(10,20), (15,20)]) [(10, 20)] >>> # Discard contained region exact match >>> merge_regions([(10,20), (10,20)]) [(10, 20)] >>> # Discard contained region fully contained >>> merge_regions([(10,20), (11,19)]) [(10, 20)] >>> # Extend region by overlap right >>> merge_regions([(10,20), (15,25)]) [(10, 25)] >>> # Extend region by overlap left >>> merge_regions([(10,20), (5,15)]) [(5, 20)] >>> # Extend immediately adjacent region by extension >>> merge_regions([(10,20), (21,30)]) [(10, 30)] >>> # No overlap >>> merge_regions([(40,50), (25,30)]) [(25, 30), (40, 50)] >>> # Single position region : discard contained region >>> merge_regions([(40,50), (40,40)]) [(40, 50)] >>> # Single position region : discard contained region >>> merge_regions([(40,50), (50,50)]) [(40, 50)] >>> # Single position region : discard contained region >>> merge_regions([(40,50), (41,41)]) [(40, 50)] >>> # Single position region : discard contained region >>> merge_regions([(40,50), (49,49)]) [(40, 50)] >>> # Single position region : extend immediately adjacent region by extension >>> merge_regions([(10,10), (11,21)]) [(10, 21)] >>> # Single position region : extend immediately adjacent region by extension >>> merge_regions([(10,20), (21,21)]) [(10, 21)] >>> # Single position region : merge two immediately adjacent single-position regions >>> merge_regions([(20,20), (21,21)]) [(20, 21)] >>> # Single position region : no overlap >>> merge_regions([(40,50), (60,60)]) [(40, 50), (60, 60)] >>> # Single position region : no overlap >>> merge_regions([(40,40), (50,60)]) [(40, 40), (50, 60)] >>> # Single position region : no overlap >>> merge_regions([(40,40), (50,50)]) [(40, 40), (50, 50)] """ if len(regions) == 0: return regions regions = sorted(regions) merged_regions = list() merged_regions.append(regions[0]) for region in regions[1:]: last_merged_region = merged_regions[-1] last_merged_region_start, last_merged_region_end = last_merged_region region_start, region_end = region if region_start >= last_merged_region_start and region_end <= last_merged_region_end: pass # discard region contained in the last region elif region_start <= (last_merged_region_end + 1) and region_end > last_merged_region_end: merged_regions[-1] = (last_merged_region_start, region_end) # extend last region by overlapping or adjacent region else: merged_regions.append(region) # add non-overlapping region to sorted list return merged_regions
b3696271ee6d73956a1a76a9594d049fc53f9bef
195,141
import math def paste_image(image, canvas, position): """ Pastes the given image on the canvas at the given position. The position denotes the center of the pasted image. """ x_offset = int(math.floor(position[0] - (image.shape[1] / 2))) y_offset = int(math.floor(position[1] - (image.shape[0] / 2))) pasted_part_start_x = max(0, x_offset * -1) pasted_part_start_y = max(0, y_offset * -1) pasted_part_end_x = min(image.shape[1], canvas.shape[1] - x_offset) pasted_part_end_y = min(image.shape[0], canvas.shape[0] - y_offset) pasted_part = image[pasted_part_start_y:pasted_part_end_y, pasted_part_start_x:pasted_part_end_x] b_start_x = max(0, x_offset) b_start_y = max(0, y_offset) canvas[b_start_y:b_start_y+pasted_part.shape[0], b_start_x:b_start_x+pasted_part.shape[1]] = pasted_part return canvas
b0dafee15f98d62b32cac61a712b94a3daab33d1
311,213
import json from datetime import datetime def mcerebrum_data_parser(line: str) -> list: """ parse each row of data file into list of values (timestamp, localtime, val1, val2....) Args: line (str): Returns: list: (timestamp, localtime, val1, val2....) """ data = [] ts, offset, sample = line[0].split(',', 2) try: ts = int(ts) offset = int(offset) except: raise Exception("cannot convert timestamp/offsets into int") try: vals = json.loads(sample) except: vals = sample.split(",") timestamp = datetime.utcfromtimestamp(ts / 1000) localtime = datetime.utcfromtimestamp((ts + offset) / 1000) data.append(timestamp) data.append(localtime) if isinstance(vals, list): data.extend(vals) else: data.append(vals) return data
f63881e3f7ff2995e33c0dd47f4f06393e18b99c
275,488
from typing import Optional def get_intersection(box1: list, box2: list) -> Optional[tuple]: """ Get intersection of the two boxes. :param box1: First box. :param box2: Second box. :return: Intersection box or None if no intersection. """ # Unpack coordinates x1, y1, x2, y2 = box1 x3, y3, x4, y4 = box2 # Get "new" coordinates x2 += x1 y2 += y1 x4 += x3 y4 += y3 x5 = max(x1, x3) y5 = max(y1, y3) x6 = min(x2, x4) y6 = min(y2, y4) if x5 > x6 or y5 > y6: return return x5, y5, x6 - x5, y6 - y5
3d63ed3fec62fb241839a1ab03c5416abb5e2dbf
589,569
def increment_letter(letter): """Return the character after `letter` in a restricted circular alphabet This increments a single letter at a time: a becomes b, z becomes a and so on. i, o and l are excluded from the alphabet used as they are not allowed to appear in valid passwords acccording to the problem description. It is, however, safe to increment those restricted letters using this function as a special case is made for them. """ restricted_dict = {"i": "j", "l": "m", "o": "p"} if letter in restricted_dict: return restricted_dict[letter] ok_letters = "abcdefghjkmnpqrstuvwxyz" current_index = ok_letters.index(letter) is_final_index = current_index == len(ok_letters) - 1 new_index = 0 if is_final_index else current_index + 1 return ok_letters[new_index]
3d10eb51762ba4399a6f9d3d7c91bb25f2e08638
561,658
def _get_cindex(circ, name, index): """ Find the classical bit index. Args: circ: The Qiskit QuantumCircuit in question name: The name of the classical register index: The qubit's relative index inside the register Returns: The classical bit's absolute index if all registers are concatenated. """ ret = 0 for reg in circ.cregs: if name != reg.name: ret += reg.size else: return ret + index return ret + index
340105a2ddfe5fb2527171a7592390c9dd2937e5
705,708
import json from typing import OrderedDict def load(file_path, ordered=False): """Load a JSON file from disk. Args: - file_path (FilePath): The fully qualified file path. Returns: - dict: The JSON data """ fh = open(file_path, mode='r') data = None if ordered: data = json.load(fh, object_pairs_hook=OrderedDict) else: data = json.load(fh) fh.close() return data
095a286b47fdaa8e2a1194078c987f85bf1f107a
209,048
def zimmermann(x): """ Zimmermann function: a non-continuous function, Equation (24-26) of [2] minimum is f(x)=0.0 at x=(7.0,2.0) """ x0, x1 = x #must provide 2 values (x0,y0) f8 = 9 - x0 - x1 c0,c1,c2,c3 = 0,0,0,0 if x0 < 0: c0 = -100 * x0 if x1 < 0: c1 = -100 * x1 xx = (x0-3.)*(x0-3) + (x1-2.)*(x1-2) if xx > 16: c2 = 100 * (xx-16) if x0 * x1 > 14: c3 = 100 * (x0*x1-14.) return max(f8,c0,c1,c2,c3)
2cf51e553075c74626cdb1b92ed29a9455a02733
312,279
import json def failed_validation(*messages, **kwargs): """Return a validation object that looks like the add-on validator.""" upload = kwargs.pop('upload', None) if upload is None or not upload.validation: msgs = [] else: msgs = json.loads(upload.validation)['messages'] for msg in messages: msgs.append({'type': 'error', 'message': msg, 'tier': 1}) return json.dumps({'errors': sum(1 for m in msgs if m['type'] == 'error'), 'success': False, 'messages': msgs, 'prelim': True})
fc9b54d5ef480ccaf0943f75042b3619a56a0924
45,781
def uri(request): """Gets the URI for the application.""" if request.app['https']: return 'https://' + request.headers['Host'] else: return 'http://' + request.headers['Host']
36b0b2f77461272112fe0007142bb4a35a3e3eec
144,008
import mpmath def pdf(x, mu=0, sigma=1): """ Normal distribution probability density function. """ # Defined here for consistency, but this is just mpmath.npdf return mpmath.npdf(x, mu, sigma)
d1ebc4e29437b3171ad928f702b8be97fcfb7bd4
90,364
def make_new_get_user_response(row): """ Returns an object containing only what needs to be sent back to the user. """ return { 'userName': row['userName'], 'categories': row['categories'], 'imageName': row['imageName'], 'refToImage': row['refToImage'], 'imgDictByTag': row['imgDictByTag'], 'canView': row['canView'], 'imgDictByImage': row['imgDictByImage'] }
a898289e5ba8d7e2141b0bf4efa6831f5cf85bc7
394,757
import collections import itertools def part2(lines): """ Analyzing all the possible numbers we get: a b c d e f g ----------------------------------- 0: x x x x x x 1: x x 2: x x x x x 3: x x x x x 4: x x x x 5: x x x x x 6: x x x x x x 7: x x x 8: x x x x x x x 9: x x x x x x ----------------------------------- r: 8 6 8 7 4 9 7 <- how many times each character is present If we assign each character to the value of r above, we find that each number can be uniquely represented by the sum of those values: 0: abcefg = 8+6+8+4+9+7 = 42 1: cf = 8+9 = 17 ... and so on """ def decode(wires, output): counter = collections.Counter(itertools.chain(*wires)) number = 0 for digits in output: number = number * 10 + mapping[sum(counter[c] for c in digits)] return number correct = { 0: "abcefg", 1: "cf", 2: "acdeg", 3: "acdfg", 4: "bcdf", 5: "abdfg", 6: "abdefg", 7: "acf", 8: "abcdefg", 9: "abcdfg", } counter = collections.Counter(itertools.chain(*correct.values())) mapping = {sum(counter[d] for d in digits): num for num, digits in correct.items()} return sum(decode(*l) for l in lines)
7eb8a5f67558ebf9514bb9909be67ead5b63f5a5
81,757
def rgb_to_megadrive_vdp(red, green, blue): """Convert a 24bit RGB value into the 12 bit long 3bit/colour format used by the megadrive VDP in the format of: BBB0 GGG0 RRR0 Args: red, green, blue: 8bit integer values used to represent a colour between 0-255 Returns: A 12bit integer formatted for use with the megadrive VDP Raises: ValueError: If red, green or blue is not 0-255 """ if red > 255 or green > 255 or blue > 255: raise ValueError if red < 0 or green < 0 or blue < 0: raise ValueError # Shift the colour to strip to 3bits then shift into the required format blue_formatted = (blue >> 5) << 9 # 0bBBB000000000 green_formatted = (green >> 5) << 5 # 0bGGG00000 red_formatted = (red >> 5) << 1 # 0bRRR0 # Mask colours together to give the formatted colour return blue_formatted | green_formatted | red_formatted
b47cb752c91fc324bd29037d8736d5302db6795c
464,654
import re def number_format(number_string, fill=2): """ add padding zeros to make alinged numbers ex. >>> number_format('2') '02' >>> number_format('1-2') '01-02' """ output = [] digits_spliter = r'(?P<digit>\d+)|(?P<nondigit>.)' for token in [m.groups() for m in re.finditer(digits_spliter, number_string)]: if token[0] is None: output.append(token[1]) else: output.append(token[0].zfill(2)) return ''.join(output)
ee44167b4597fbe7c9f01fa5b26e02d7608c3677
709,103
import socket def _check_usage(host: str, port: int) -> bool: """ Checks to see whether or not the specified port is utilized and returns a boolean indicating whether it is or not. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) return not bool(sock.connect_ex((host, port)))
523f4193c3d8bdb755f0124cf9860c8c710ad8c7
210,751
def get_cleaned_sentences_as_strings(cleaned_title, cleaned_description): """ Combines the title with the desciption sentences and makes every sentence into a string """ sentences_as_tokens = [cleaned_title] + cleaned_description sentences_as_strings = [" ".join(sentence_tokens) for sentence_tokens in sentences_as_tokens] return sentences_as_strings
82a3d58adc5341f4960ca79b38e9e9d51edc314a
177,182
def u2q(u1, u2, warnings=True): """ Convert the linear and quadratic terms of the quadratic limb-darkening parameterization -- called `u_1` and `u_2` in Kipping 2013 or `a` and `b` in Claret et al. 2013 -- and convert them to `q_1` and `q_2` as described in Kipping 2013: http://adsabs.harvard.edu/abs/2013MNRAS.435.2152K Parameters ---------- u1 : float Linear component of quadratic limb-darkening u2 : float Quadratic component of quadratic limb-darkening Returns ------- (q1, q2) : tuple of floats Kipping (2013) style quadratic limb-darkening parameters """ q1 = (u1 + u2)**2 q2 = 0.5*u1/(u1+u2) if warnings and (u1 < 0 or u2 < 0): print("WARNING: The quadratic limb-darkening parameters " + "u1={0:.3f} or u2={0:.3f} violate Kipping's ".format(u1, u2) + "conditions for a monotonically increasing or everywhere-" + "positive intensity profile. Returning them as is.") return q1, q2
baa934c792be8e0b72a9ede9a1431f356f9496fa
691,759
def get_loggers_config(config_data): """Extracts loggers configuration from configuration data :param config_data: A configuration file data :return: A dictionary of the loggers configuration """ loggers = {k: config_data[k] for k in ('loggers',)} loggers = loggers['loggers'] return loggers
0d095879ce8c51fb0ee8c9bdfb7fb5286c1ee67b
369,095
import re def get_latest_hub_per_task(hub_module_paths): """Get latest hub module for each task. The hub module path should match format ".*/hub/[0-9]*/module/.*". Example usage: get_latest_hub_per_task(expand_glob(["/cns/el-d/home/dune/representation/" "xzhai/1899361/*/export/hub/*/module/"])) returns 4 latest hub module from 4 tasks respectivley. Args: hub_module_paths: a list of hub module paths. Returns: A list of latest hub modules for each task. """ task_to_path = {} for path in hub_module_paths: task_name, module_name = path.split("/hub/") timestamp = int(re.findall(r"([0-9]*)/module", module_name)[0]) current_path = task_to_path.get(task_name, "0/module") current_timestamp = int(re.findall(r"([0-9]*)/module", current_path)[0]) if current_timestamp < timestamp: task_to_path[task_name] = path return sorted(task_to_path.values())
17d5f763ca3837dd6cf1fe1dff5b265a64dfa976
671,915
import torch def get_dihedral_torch(c1, c2, c3, c4, c5): """ Returns the dihedral angle in radians. Will use atan2 formula from: https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics """ u1 = c2 - c1 u2 = c3 - c2 u3 = c4 - c3 u4 = c5 - c4 return torch.atan2( torch.dot( torch.norm(u2) * u1, torch.cross(u3,u4) ), torch.dot( torch.cross(u1,u2), torch.cross(u3, u4) ) )
7affbd90734b35d22ec2526b75cb6143ec9ab87d
129,030
def _internal(func): """ Mark this function as internal. """ func.internal = True return func
bede1d38d846ecca46a78ae17b5031aa3080a14a
287,512
def _get_tx_params(w3, address, value=None, gas=None): """Get generic transaction parameters.""" params = { "from": address, "nonce": w3.eth.getTransactionCount(address), } if value: params["value"] = value if gas: params["gas"] = gas return params
39d265ae8a5b8ce476920b02654c012164f80763
354,454
def truncate_roi(orig_roi, src_image_size): """ Returns truncated ROI for source and destination images. Crops ROI so that image edges are handled correctly. """ # Set x position of ROI if orig_roi[0] < 0: src_x = 0 dst_x = -orig_roi[0] w = orig_roi[2] + orig_roi[0] else: src_x = orig_roi[0] dst_x = 0 w = orig_roi[2] # Set y position of ROI if orig_roi[1] < 0: src_y = 0 dst_y = -orig_roi[1] h = orig_roi[3] + orig_roi[1] else: src_y = orig_roi[1] dst_y = 0 h = orig_roi[3] # Set width of ROI if (src_x + w) >= src_image_size[0]: w = src_image_size[0] - src_x - 1 # Set height of ROI if (src_y + h) >= src_image_size[1]: h = src_image_size[1] - src_y - 1 # Create source and destiniatin image ROI's src_roi = src_x, src_y, w, h dst_roi = dst_x, dst_y, w, h return src_roi, dst_roi
ce5699a8771585ebffa7470287143e89771c5b25
72,459
import math def y_values_between_points(average, left_pt, right_pt): """Return the list of values between left_pt and right_pt.""" start = int(math.floor(left_pt.x)) end = int(math.ceil(right_pt.x)) + 1 return [average[i] for i in range(start, end)]
cbfcd56137f5bc670e8fa61e8a731ca26dba8e25
106,180
def camel_case(snake_str): """ Returns a camel-cased version of a string. :param a_string: any :class:`str` object. Usage: >>> camel_case('foo_bar') "fooBar" """ components = snake_str.split('_') # We capitalize the first letter of each component except the first one # with the 'title' method and join them together. return components[0] + "".join(x.title() for x in components[1:])
4e202d63f8e8c971597e99ee52af73c03cecb632
112,199
def reduce_max(x, axis, keepdims): """Reduces input_tensor along the dimensions given in axis Parameters ---------- x: tensor to reduce axis: dimensions to reduce, python list keepdims: if true, retains reduced dimensions with length 1 Returns ------- x_red: reduced tensor """ x_red = x for n in axis: x_red = x_red.max(dim=n, keepdim=keepdims).values return x_red
79d65571ecb733cfe24095d225f84b5a2cff91d5
206,157
def _int(value): """ Converts integer string values to integer >>> _int('500K') >>> 500000 :param value: string :return: integer """ value = value.replace(",", "") num_map = {"K": 1000, "M": 1000000, "B": 1000000000} if value.isdigit(): value = int(value) else: if len(value) > 1: value = value.strip() value = float(value[:-1]) * num_map.get(value[-1].upper(), 1) return int(value)
0b71dd22fa39e9b6450050299046c4c29b21c27e
469,513
def omit_empty(dictionary): """Omit key in dictionary if value is not truthy""" return {key: value for key, value in dictionary.items() if value}
c83eacdce582a82b7f24a602f27a766c08e528a6
143,879
def _hostname_matches(cert_pattern, actual_hostname): """ :type cert_pattern: `bytes` :type actual_hostname: `bytes` :return: `True` if *cert_pattern* matches *actual_hostname*, else `False`. :rtype: `bool` """ if b"*" in cert_pattern: cert_head, cert_tail = cert_pattern.split(b".", 1) actual_head, actual_tail = actual_hostname.split(b".", 1) if cert_tail != actual_tail: return False # No patterns for IDNA if actual_head.startswith(b"xn--"): return False return cert_head == b"*" or cert_head == actual_head else: return cert_pattern == actual_hostname
8974f65a656b4e08da605389621f96cf30979ccf
478,847
def rightrotate_numba(x, c): """ Right rotate the number x by c bytes.""" x &= 0xFFFFFFFF return ((x >> c) | (x << (32 - c))) & 0xFFFFFFFF
635ea6bbd2d56b3f740f7929a40bb13d97ad7984
564,145
def are_datasets_compatible(labeled_dataset_name, unlabeled_dataset_name): """Check if a pair of datasets are compatible for semisupevised learning. Args: labeled_dataset_name (str): a string identifier. unlabeled_dataset_name (str): a string identifier. Returns: Boolean """ valid_combos = [ ("cifar_unnormalized", "svhn"), ("svhn", "cifar_unnormalized"), ("svhn", "svhn_extra"), ] return (labeled_dataset_name == unlabeled_dataset_name) or ( labeled_dataset_name, unlabeled_dataset_name, ) in valid_combos
1d6b2c6ae229ad07c83a9adb55dee59f2f300105
527,973
def pad_batch(batch): """ pad sequences in batch with 0s to obtain sequences of identical length """ seq_len = list(map(len, batch)) max_len = max(seq_len) padded_batch = [seq + [0]*(max_len-len(seq)) for seq in batch] return padded_batch, seq_len
251a62bc4c89df9c14e51d7ca53d1130901542ed
596,789
def is_empty(s): """ True if None or string with whitespaces >>> is_empty(None) True >>> is_empty("hello") False >>> is_empty(" \t ") True """ return s is None or len(s) == 0 or s.isspace()
2ee44247416f093ef4bdfa5f9b4cd9a21b556d6e
202,782
def get_study(assc, size=5): """ Return most annotated genes from association dict """ most_annotated = sorted(assc.keys(), key=lambda i: len(assc[i]), reverse=True) study = most_annotated[:size] study = frozenset(study) print(f"### Using the {size} most annotated genes as study: {','.join(study)} ") return study
0e37c6220c0d5e5db0904a673ffe6b4d5a42fd5c
43,693
def coordinate2inx(coordinate, row=8, col=16, im_shape=[300, 600]): """Convert coordinate of top-left corner of bbox into index. Index on solar module looks like: [[0, 1, 2] [3, 4, 5]] Parameters ---------- coordinate: list [x, y] of top-left corner of bbox row, col: int number of rows and columns of solar module im_shape: list Shape of the module image in the form of [height, width] Returns ------- inx: int Index of the bbox """ inx = col * round(coordinate[1] / (im_shape[0] / row)) + round(coordinate[0] / (im_shape[1] / col)) return inx
b30b0e6cd517ae2a8b06e2921a155fdd1a4958bf
529,446
def vec2id(x, limits): """ :param x: A discrete (multidimensional) quantity (often the state vector) :param limits: The limits of the discrete quantity (often statespace_limits) Returns a unique id by determining the number of possible values of ``x`` that lie within ``limits``, and then seeing where this particular value of ``x` falls in that spectrum. .. note:: See :py:meth:`~rlpy.tools.general_tools.id2vec`, the inverse function. .. warning:: This function assumes that (elements of) ``x`` takes integer values, and that ``limits`` are the lower and upper bounds on ``x``. """ if isinstance(x, int): return x _id = 0 for d in range(len(x) - 1, -1, -1): _id *= limits[d] _id += x[d] return _id
acbda772fb19df2e782239608e1c117bf3ab7051
288,601
def contains_three_consecutive_letters(password: str) -> bool: """ Return True if the password has at least one occurrence of three consecutive letters, e.g. abc or xyz, and False if it has no such ocurrences. """ characters = [ord(char) for char in password] return any( (a + 1) == b and (b + 1) == c for a, b, c in [characters[x : x + 3] for x in range(len(password) - 2)] )
519d4075fa85dd36a25a561ab21a76dd75539de3
560,902
import requests def createPost(url, data): """ Internal call for the API Parameters ---------- url : str the url of the api endpoint data : dict the json data as a dictionary object """ resp = requests.post(url, json = data) if resp.status_code != 201: print(resp.text) return resp
82bbaf9de95f298299547e7d4fe71d1911e9feb3
265,487
def k_to_f(tempe): """Receives a temperature in Kelvin and returns in Fahrenheit""" return (tempe - 275.5) * 9 / 5 + 32
32bd04835584707f2325ff9fe2ec675c315842f3
396,681