content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def coef_to_str(c,prec=3): """Converts a float coefficient (c) into a string, with precision given by prec.""" return "{:.{precision}f}".format(c, precision = prec)
4b01e8c10a6d9c443e5d411abb379c8be0bef87c
481,658
def lists2dict(listA, listB): """ Given two lists of the same length, merge them in one dictionary """ return dict(zip(listA, listB))
cbd0068782343b006e5bfa93b6d2030de6ba8ca4
166,380
def get_pyramid_levels(conn, image_id, across_groups=True): """Get number of pyramid levels associated with an Image Parameters ---------- conn : ``omero.gateway.BlitzGateway`` object OMERO connection. image_id : int ID of ``Image``. across_groups : bool, optional Defines cross-group behavior of function - set to ``False`` to disable it. Returns ------- levels : list of tuples Pyramidal levels available for this image, with number of pixels for X and Y axes. Examples -------- # Return pyramid levels associated to an image: >>> lvls = get_pyramid_levels(conn, 42) [(2048, 1600), (1024, 800), (512, 400), (256, 200)] """ image = conn.getObject("image", image_id) pix = image._conn.c.sf.createRawPixelsStore() pid = image.getPixelsId() pix.setPixelsId(pid, False) levels = [(r.sizeX, r.sizeY) for r in pix.getResolutionDescriptions()] pix.close() return levels
aa927588b741caa1d6c84aeffa5c41e7150aa32c
269,658
def as_uppercase(tokens): """ Converts the token to uppercase if possible. """ return ''.join(tokens).upper() if tokens else None
c74596acf75b22c38535ccce928cbe3483cc83ed
228,340
def get_height(clust): """Return height for given cluster""" # Is this an endpoint? The the height is just 1 if not clust.left and not clust.right: return 1 # Otherwise the height is the same of the heights of each branch return get_height(clust.left) + get_height(clust.right)
f2b32a27cda7858dced6c40c931c5fe56de23ee0
606,479
def add_line_breaks_to_sequence(sequence, line_length): """ Wraps sequences to the defined length. All resulting sequences end in a line break. """ if not sequence: return '\n' seq_with_breaks = '' pos = 0 while pos < len(sequence): seq_with_breaks += sequence[pos:pos+line_length] + '\n' pos += line_length return seq_with_breaks
6cb9101690b173604c0dac5f0b9300f3a6db4e1a
63,414
def process(workflow_instance, **kwargs): """Process an instance of a workflow using the runtime parameters given by ``kwargs``. """ instance = workflow_instance(**kwargs) instance.process() out = instance.get_outputs() instance.serialize_state() return out
4417b2d9dac61eec1c905586bf9303d041ec4ccb
120,647
def is_read_variable_op(op): """Is variable read operation.""" return op.type in ["Identity"] and \ op.inputs[0].op.type in ["Variable", "VariableV2"]
fa8507e86ba5582febad6505d1ee0fb5b952e601
527,640
import socket import ipaddress def _verify_hostname(host): """Verify a hostname is resolvable.""" try: resolved = socket.getaddrinfo(host, None)[0][4][0] ip = ipaddress.ip_address(resolved) return ip except (socket.gaierror, ValueError): return False
d0b74fdcf7fabec0083cd5de295199077ad6e676
45,749
def left_remove(text, to_remove): """ Removes a part of a string, if it starts with it. Similar to str.lstrip, see note below on right_remove """ if text.startswith(to_remove): return text.replace(to_remove, '', 1) else: return text
f8b9fa83a78eab45c634e3fcc77e63ad95f3a9cc
595,029
def rounder(num, digits=None): """Round a floating point number to given number of digits after the decimal point. Parameters ---------- num : float Number to round. digits : int, optional Digits after the decimal point. Returns ------- int or float Rounded number. See Also -------- initize Notes ----- Variant of `intize`, allowing rounding to custom decimal precision. """ near = round(num * 2, digits) / 2 if abs(num - near) <= (1e-7 / 10 ** digits if digits else 1e-7): return round(near, digits) else: return round(num, digits)
110aeb6a6e0e54ba4fb9c1aefa8fbd43b2987130
79,560
def to_int_tuple(a): """Convert an array of strings to a tuple of ints. :param [int] a: array of strings to convert """ return tuple([int(x.strip()) for x in a])
8812932813822f40be727352cc0ee561736fe5b0
332,495
def from_aws_tags(tags): """ Convert tags from AWS format [{'Key': key, 'Value': value}] to dictionary :param tags :return: """ return {tag['Key']: tag['Value'] for tag in tags}
a58931a29302154cc01656ece403d1468db1a6ab
25,374
def clean_response(result): """ Clean a response by removing unnecessary fields """ result = result["PET"] try: del result["BLOQ"] except (KeyError, TypeError): pass return result
3c658d4fea334f3a843de6c3af3e06bd669bfde9
592,279
def efficientnet_params(model_name): """Map EfficientNet model name to parameter coefficients.""" params_dict = { # (widthi_coefficient, depth_coefficient, image_size, dropout_rate) 'efficientnet_lite0': (1.0, 1.0, 224, 0.2), 'efficientnet_lite1': (1.0, 1.1, 240, 0.2), 'efficientnet_lite2': (1.1, 1.2, 260, 0.3), 'efficientnet_lite3': (1.2, 1.4, 280, 0.3), 'efficientnet_lite4': (1.4, 1.8, 300, 0.3), } return params_dict[model_name]
1757386df903c8f04a2bc2cf1e29ce3a1feb9606
257,578
def to_snake(m): """Convert to snake case.""" return '_' + m.group(0).lower()
8ea8a1fc7ada2b3a280442cc212aacfa6eae25b4
460,367
def _get_attention_weights(cell, is_training): """ Obtain the attention weights if needed. """ weights = None if is_training: weights = cell._cells[-1]._cell.attention_weights # pylint: disable=W0212 else: weights = cell._cells[-1].attention_weights # pylint: disable=W0212 return weights
72ebafd09e43a4ac0d732c395a9e69aade70fad3
435,560
import torch def compute_scores(users_embeds: torch.Tensor, items_embeds: torch.Tensor, items_bias: torch.Tensor) -> torch.Tensor: """ Args: users_embeds(torch.Tensor): shape (batch_size, items_total, embed_dim) items_embeds(torch.Tensor): shape (items_total, embed_dim) items_bias(torch.Tensor): shape (items_total) Returns: scores(torch.Tensor): shape (batch_size, items_total) """ scores = (users_embeds * items_embeds).sum(-1) + items_bias return scores
8528963a23efef270b467ec6f039b5a8733d3b4f
12,176
def GetRunKeyName(name): """Returns a str used to uniquely identify a run.""" return 'Run_%s' % name
113933373ae1a38408912453a1e84586ba78dca7
629,076
def sanitize(name): """ Sanitize the specified ``name`` for use with breathe directives. **Parameters** ``name`` (:class:`python:str`) The name to be sanitized. **Return** :class:`python:str` The input ``name`` sanitized to use with breathe directives (primarily for use with ``.. doxygenfunction::``). Replacements such as ``"&lt;" -> "<"`` are performed, as well as removing spaces ``"< " -> "<"`` must be done. Breathe is particularly sensitive with respect to whitespace. """ return name.replace( "&lt;", "<" ).replace( "&gt;", ">" ).replace( "&amp;", "&" ).replace( "< ", "<" ).replace( " >", ">" ).replace( " &", "&" ).replace( "& ", "&" )
3585902680c05bb681df36ba4083525b808e71de
658,489
def listify_multiline_string(string): """ Return a list constructed by splitting the given multiline string, stripping whitespace, and filtering out empty values. :param string: The multiline string to convert into a list. :return: The resulting list. """ result = [i.strip() for i in string.splitlines()] return filter(None, result)
3234b4e07cd8b47c9ca30dc2cd5e866ddee76969
40,017
def strip_wrapping(html): """ Removes the wrapping that might have resulted when using get_html_tree(). """ if html.startswith('<div>') and html.endswith('</div>'): html = html[5:-6] return html.strip()
4a9aa4e8d49f79f53669fdfce85164b500018907
110,208
import re def validate_name(name): """ name should conform to the pattern ^[a-z][a-z0-9-]{1,28}[a-z0-9]$ as specified in the Heroku API. Returns True if name conforms to Heorku API naming standards, False otherwise. """ name_regex = re.compile(r"^[a-z][a-z0-9-]{1,28}[a-z0-9]$") return True if name_regex.search(name) is not None else False
922c947a952809e84126f654628d9a80932b9e4d
358,568
def munge_hold_arg(hold_arg): """Takes the --hold parameter and translates it for SLURM """ if ':' in hold_arg: #pass it as-is return hold_arg else: return 'afterok:' + ':'.join([str(int(j)) for j in hold_arg.split(',')])
1623c0c6b810a11739e8856744bce3cbfcb6adb5
330,620
def _lc(filename: str) -> int: """ Helper function to count the number of lines in a file. :param filename: name of the file to be read -- assumed to be plain text. :returns: count of the number of lines in file """ c = None with open(filename,'r') as fh: c = 0 for line in fh: c += 1 return c
543e8e801fdaabb903de6d2fc36a08c1778a7141
514,366
def render_subcommand(args): """Render a subcommand for human-centric viewing""" if args.subcommand == 'delete': return 'delete ' + args.delete_subcommand else: return args.subcommand
af0577f30b6ef62b5cc177de381b54c4ca00795c
565,620
def merge_dict(base, delta): """ Recursively merging configuration dictionaries. Args: base: Target for merge delta: Dictionary to merge into base """ for k, dv in delta.items(): bv = base.get(k) if isinstance(dv, dict) and isinstance(bv, dict): merge_dict(bv, dv) else: base[k] = dv return base
623bda8638fd9d172deb770d3cedc958f2a0e9b2
450,158
def quality_index(dat, colname): """Return the index for `colname` in `dat`""" colname = colname.split(':')[0] return list(dat.dtype.names).index(colname)
d13f89bd6da6ea09637433d2ed7c6378a738a6cf
691,293
def _insert_space(end_index, string): """Inserts the number of spaces required given the string and the position of the next non-blank field. Parameters: end_index : int Expected string length string : str String to add spaces to Returns: string_with_blanks : str String with spaces padded on the end """ string += ' ' * (end_index - len(string)) return string
ac916d1639b9e78c4729d8a65ccdcbed6a2a61f8
590,631
def to_chunks(lst, n): """List of sublists of size n form lst :param lst: List :param n: Integer :returns: List""" res = [] for i in range(0,len(lst), n): res.append(lst[i:i+n]) return res
66e5276377737c0271794bee3620a20635b0d958
565,302
def brew_ids(request): """ Test ids for brew_ids. """ return request.param
75e4b1d3560904956a2e07eac7d05b91a52d7b6b
253,829
def to_full_resource_name(full_parent_name, resource_type_name): """Creates a full resource name by parent full name and type name. Args: full_parent_name (str): the full_resource_name of the parent resource_type_name (str): the full_resource_name of the child Returns: str: full_resource_name of the child """ # Strip out the fake composite root parent from the full resource name. if full_parent_name == 'composite_root/root/': return '{}/'.format(resource_type_name) # For resource names that contain embedded /s, set the full type name # to just the first and last part. type_name_parts = resource_type_name.split('/') if len(type_name_parts) > 2: resource_type_name = '{}/{}'.format(type_name_parts[0], type_name_parts[-1]) return '{}{}/'.format(full_parent_name, resource_type_name)
36c1bdb426980a86f6a007edc7f1c5bdba37e235
565,430
def get_spaced_colors(n, alpha): """ Helper function. Generate a list of different colors (as RGBA format) with given alpha """ max_value = 16581375 #255**3 interval = int(max_value / n) colors = [hex(I)[2:].zfill(6) for I in range(0, max_value, interval)] RGBA_colors = [(int(i[1::3], 16)/255, int(i[0::3], 16)/255, int(i[2::3], 16)/255, alpha) for i in colors] return RGBA_colors
cb251d4b2a624c1767d06573d4bd00e0ee2236fc
448,985
def _int_version(version_string): """Get int version string""" major = int(version_string.split(".")[0]) return major
14705e221afe2c9c35f7d77f3a201a40f06698de
537,246
def select_relevant_profiles(all_profiles): """Select relevant profiles criteria: * is public * region is selected region * AGE specified * GENDER SPECIFIED """ public_condition = all_profiles["public"] == 1 age_condition = all_profiles["AGE"] > 14 gender_condition = all_profiles["gender"].isin([0, 1]) return all_profiles.loc[public_condition & age_condition & gender_condition]
08c4980ec96ac836806f44ea7d4dfb8e09d6265c
75,440
def do_intervals_intersect(a, b): """Returns true if the given 2-tuples overlap. Args: a: 2-tuple containing integer coordinates representing a half-open interval. b: 2-tuple containing integer coordinates representing the other half-open interval. Returns: True if a and b overlap. """ return a[0] < b[1] and a[1] > b[0]
2e20578fdfaf4de682de55ffdd20bf4d923e1545
389,823
def check_triggers (input_line: str, triggers: list) -> int: """ Check the presence of the trigger in the `input_line`. Parameters: input_line (str) : string to be checked triggers (list) : list of triggers (templated variables) Returns: out_id (int) : id of the trigger item in the list """ out_id = -1 for id,trig in enumerate(triggers): if trig in input_line or trig.upper() in input_line: out_id = id return out_id return out_id
4145f9c686f018dbe2d016cc251b5b4773e9a981
402,500
def initialize(G, s): """Initialize graph G and vertex s.""" V, E = G d = {v: float('inf') for v in V} p = {v: None for v in V} d[s] = 0 return d, p
2dfb199b0b5a46c10c3f256a0b2dd826afa8a635
219,383
from typing import List import socket def resolve(hostname: str) -> List[str]: """Do A record lookup, return list of IPs.""" return socket.gethostbyname_ex(hostname)[2]
8fcd7007e56a9cdae7a241604999ad6fb25717ac
177,668
import base64 def image_to_base_64_bytes(path): """ This takes a path and returns a byte array from the image. This byte array is base 64 Exceptions: IOError if the path does not exist. """ with open(path, "rb") as image: data = base64.standard_b64encode(image.read()) return data
6dc65efdc33d90c9a3e419a5ff20de50827b85d1
170,908
def trim_urls(attrs, new=False): """Bleach linkify callback to shorten overly-long URLs in the text. Pretty much straight out of the bleach docs. https://bleach.readthedocs.io/en/latest/linkify.html#altering-attributes """ if not new: # Only looking at newly-created links. return attrs # _text will be the same as the URL for new links. text = attrs['_text'] if len(text) > 32: attrs['_text'] = text[0:30] + '...' return attrs
57558ab63f042708c88cabe8c9aee94c4237c05c
644,735
import time def str2timestamp(time_str, time_format): """时间字符串转时间戳 Args: time_str: 时间字符串 time_format: 解析字符串的格式 比如:'%Y-%m-%d %H:%M:%S' Returns: 返回unix时间戳, 类型: float, 单位: s """ return time.mktime(time.strptime(time_str, time_format))
14e71c8826a5f2f7d93daaa6733c0f57c5d774a9
141,796
def purchase_intention(people_who_declared_interest, total_people): """Returns the purchase intention rate for a product. This can be used for cart-to-detail, buy-to-detail, and similar calculations. Args: people_who_declared_interest (int): Number of people who declared interest in a product. total_people (int): Total number of people. Returns: Percentage of people who were interested in a product. """ return (people_who_declared_interest / total_people) * 100
fa5e4c16fa3a796c2297d9b39b1e5ccc9f3459ce
640,928
from typing import Dict from typing import Any import collections def convert_dict_key(key_map: Dict[str, str], dict_to_convert: Dict[str, Any], keep_unmapped_keys: bool = False) -> Dict[str, Any]: """Convert all keys in dictionary to the new key provided in mapping. Args: key_map: A mapping from old key to new key. dict_to_convert: The dictionary to be converted. keep_unmapped_keys: Keep the key, value pair as is if not in key_map. Returns: A dictionary with the same values as dict_to_convert with the new keys provided in key_map. """ d = collections.OrderedDict() for k, v in dict_to_convert.items(): if k in key_map: d[key_map[k]] = v elif keep_unmapped_keys: d[k] = v else: # This will only happen when we have an invalid explain metadata. raise ValueError("Conversion failed. Key {} not in key map {}.".format( k, repr(key_map))) return d
93f440ef627180371115095367c55420fbdcd051
311,082
def calc_euclidean_gcd(left: int, right: int) -> int: """Find greatest common divisor using Euclidean Algorithm.""" if right == 0: return left return calc_euclidean_gcd(left=right, right=left % right)
e23ef880804777c6965921d5917ce0d1b1a0d6e6
616,467
def add_to_fieldsets(section=True, collapse=False): """ Adds gatekeeper fields to your ModelAdmin fieldsets. Options: Section: you can add the fields either as it's own section or as part of a section. Collapse: whether the section should be collapsable or not. How to use: # section = False fieldsets = ( (None, { 'fields': ( ('pk',), gatekeeper_add_to_fieldsets(section=False), ), }), ) # section = True fieldsets = ( (None, { 'fields': ( ('pk',), ), }), gatekeeper_add_to_fieldsets(section=True), ) """ fields = ('publish_status', 'show_publish_status', 'live_as_of') if section: if collapse: d = {'classes': ('collapse',), 'fields': fields, } else: d = {'fields': fields, } s = ('Publishing', d) return s return fields
509f9cad52c2c354fdb2efc704087c436092f458
167,766
import json def loads(string, **kwargs): """Load object from JSON string.""" return json.loads(string, **kwargs)
349b41b141ee97263fb69f0d23c1c3bfe756f698
618,675
def yes_or_no(question): """ Get a y/n answer from the user """ while "the answer is invalid": reply = str(input(question+' (y/n): ')).lower().strip() if reply[:1] == 'y': return True if reply[:1] == 'n': return False
31990e3e6eb6274a0786583ae3ece8c57dfce7b1
604,880
def make_cav_dir_path(cav_code): """ Make path to shema directory """ if len(cav_code) == 2: return "schemas/{start}".format(start=cav_code[:2]) return "schemas/{start}/{list}".format(start=cav_code[:2], list='/'.join(list(cav_code[2:])))
9adccc3154ce4c15e9a2e11573dbcaa956d75e9d
74,274
def render_dummy(title: str) -> str: """Render dummy markdown.""" return fr"""--- title: {title} ... ::: {{style="display:none"}} ```c ``` ::: """
8e8c6f6e1f088b2a69406185ef8fd6204e145080
165,148
def strip_d_hyphen(text): """Return string without double hyphens. Keep other punctuation.""" text = text.replace('--', ' ') return text
465e455ab8371c7684e9b0b1733eace585cda1a7
549,905
def test_data_splitter(batch_data, pred_len): """ Split data [batch_size, total_len, 2] into datax and datay in train mode :param batch_data: data to be split :param pred_len: length of trajectories in final loss calculation :return: datax, datay """ return batch_data[:, :-pred_len, :], batch_data[:, -pred_len:, :]
dec86627beaac446997b6eb98218b5ecf1674f9f
364,725
def is_workfunction(function): """ Return whether the given function is a workfunction :param function: a function :returns: True if the function is a wrapped workfunction, False otherwise """ try: return function.is_workfunction except AttributeError: return False
f81a5c41d6dc6daf8e3f5abcd97cb44604485a3c
330,589
def life(cash_flows): """Calculates the life of the cash flows. :param cash_flows: The cash flows. :return: The life. """ return max(cash_flow.time for cash_flow in cash_flows)
82117274b722a56eacb7e7203cb2daeb7c6f304c
567,594
def merge(*dicts): """Merges the given dictionaries into a single dictionary, ignoring overlapping keys.""" out = dict() for dictionary in dicts: for (key, val) in dictionary.items(): out[key] = val return out
c9a3899407b36357c046bed594d5939bc7aab4b3
18,973
import csv def read_csv_as_table(csv_input_file_name, skip_first_line=False): """Read the given CSV file, parse it, and return as list of lists.""" output = [] with open(csv_input_file_name, 'r') as fin: csv_content = csv.reader(fin, delimiter=',') if skip_first_line: next(csv_content, None) for row in csv_content: output.append(row) return output
33a0adfb0e3c2f28577850810379939b935d6720
154,612
def objdict_to_dict(objdict): """ Convert an objdict structure into a dict structure Args: obj (objdict): the objdict to convert Returns: dict: the objdict as standard dictionnary """ d = {} if objdict: for k, v in objdict.items(): if type(v) == dict: d[k] = objdict_to_dict(v) else: d[k] = v return d
11e0cbe6e208e69742822fcf643a19975b431330
629,937
def safe_position(n): """ function to get the safe position formulae Initial(n) = 2^a +l W(n) = 2l + 1; where n = the total number a = the power of two l = the reminder after the power is deducted from n """ pow_two = 0 i = 0 while (n - pow_two) >= pow_two: pow_two = 2**i i = i+1 l = n - pow_two safe_p =(2* l) +1 return safe_p
66d88a9d91d879682d5a1cad463dc8dc1a330d6d
551,366
def map_cap_to_opnames(instructions): """Maps capabilities to instructions enabled by those capabilities Arguments: - instructions: a list containing a subset of SPIR-V instructions' grammar Returns: - A map with keys representing capabilities and values of lists of instructions enabled by the corresponding key """ cap_to_inst = {} for inst in instructions: caps = inst['capabilities'] if 'capabilities' in inst else ['0_core_0'] for cap in caps: if cap not in cap_to_inst: cap_to_inst[cap] = [] cap_to_inst[cap].append(inst['opname']) return cap_to_inst
45fea02c7a25a7d0e06f4aa3328fab530940093d
89,668
def max_class_score(class_scores, return_total=False): """ Get the class with the highest score, score (and score total) from a list of classes with scores Parameters ---------- :param class_scores: list of classes with scores attached Format: [(class, score), ... (all items)] :param return_total: boolean denoting whether or not the total score should be returned Return ------ :return: class with the highest score, score (and score total) Format: [[max_class, score], total] """ best = (None, -1) total = float(0) for item_score in class_scores.items(): total += item_score[1] if best[1] < item_score[1]: best = item_score return best if not return_total else best, total
b42310143085fb1136b2949e847d45fd3f83c8e6
113,874
def local_url(url, code=None): """Replace occurences of `{locale_code} in URL with provided code.""" code = code or "en-US" return url.format(locale_code=code)
b5f9cb32f2fd722ce05cf792fdf3b2dd40880154
567,632
import time def get_ids_dates(df, c, id_date_dict, unique_ids): """ Gets data (ids & dates) from SureChemBL dataframe Builds a list of unique ids, and a dictionary of ids associated with the earliest date of entry. Applied to both compounds or patents. Args: df: individual dataframe of SureChemBL data (from read_data()) c: "cpdID" or "patentID", depending on which data is used id_date_dict: existing dictionary linking ids & dates unique_ids: existing list of unique ids Returns: list of all unique ids, dictionary of all unique ids with earliest date of entry """ #Find unique compounds unique_ids = list(set(df[c].tolist() + unique_ids)) for index, row in df.iterrows(): #, total=df.shape[0]: #Add ID if not present in database if row[c] not in id_date_dict: id_date_dict[row[c]] = row["Date"] else: #If the id is there, check if the patent date is earlier than before if time.strptime(row["Date"], "%Y-%m-%d") < time.strptime( id_date_dict[row[c]], "%Y-%m-%d"): #If so, replace date with earlier time id_date_dict[row[c]] = row["Date"] return unique_ids, id_date_dict
b2d2a2bb045a4a57e7a6de85b3bcba31cc87ba8e
455,696
def slack_escape(text: str) -> str: """ Escape special control characters in text formatted for Slack's markup. This applies escaping rules as documented on https://api.slack.com/reference/surfaces/formatting#escaping """ return text.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
46dd7c138e117a45b4b567d9b63a15de8b4332a0
624,505
def add_iscsi_auth_group(client, tag, secrets=None): """Add authentication group for CHAP authentication. Args: tag: Authentication group tag (unique, integer > 0). secrets: Array of secrets objects (optional). Returns: True or False """ params = {'tag': tag} if secrets: params['secrets'] = secrets return client.call('add_iscsi_auth_group', params)
aa169099018d9efd5721445635b2fb21d24bf2c1
582,019
import math def entropy(data): """ Calculate informational entropy. """ entropy = 0.0 frequency = {} for instance in data: p_instance = int(round(instance/5) * 5) if p_instance in frequency: frequency[p_instance] += 1 else: frequency[p_instance] = 1 for freq in frequency.values(): entropy += (-freq/len(data)) * math.log(float(freq)/len(data), 2) return entropy
4b96c229e4cc0318a764990569d2951003447a72
699,044
def get_topo(idx, conn): """ Get the topology for an id. Args: idx: Protein id. conn: Sqlite3 connection object. Returns: Tuple of (num_tms, num_reentrant). """ return conn.cursor().execute('SELECT num_tms, num_res FROM proteins WHERE id=?', (idx, )).fetchone()
0e55ef4100e34acfe148563018fa92d85c934480
417,090
def separate_punctuation(x, punctuations): """ Add space around pre-defined punctuations """ for p in punctuations: x = x.replace(p, f" {p} ") return x
10ac16f3ec5bfaf84c79d489703faf744781ac8c
301,313
def build_entity(key, value): """ build_entity return a dict that can be passed back to rasa as an entity using the given string key and value """ return {"entity": key, "value": value, "start": 0, "end": 0}
3e29ed0d9dc3adad003ff95b75a8c46edd0becce
638,333
from typing import Any import math def nan_to_none(value: Any) -> Any: """Convert NaN values to None. Parameters ---------- value: any Cell value. Returns ------- any """ if isinstance(value, int) or isinstance(value, float): return None if math.isnan(value) else value return value
f48fe7ae6e5365d53b99bb7f0c9ee2316377dd11
131,307
import re def upload_via_vredditcc(url): """Generate video link for https://vreddit.cc""" vreddit_video = re.compile(r'https?://v\.redd\.it/(\w+)') vreddit_id = vreddit_video.findall(url)[0] return "https://vreddit.cc/" + vreddit_id
9affaa74dd152fb852264c546d2b4aeba1115d29
429,509
def evap_volume_OILTRANS(k, area, p_oil, molar_v, T, molar_fraction = 1, R = 8.314): """ Return the evaporated fraction by unit of time [/s] source : (Berry et al., 2012) Parameters ---------- k : mass transsfert coefficent [m/s] area : Area [m²] p_oil : Vapor pressure of the hydrocarbon [Pa] molar_v : Molar volume [m³/mol] T : Temperature [K] molar_fraction : Molar fraction, can be other than 1 if not pure R : Perfect gas constant, the default is 8.314 [J/mol K] """ return (k * area * p_oil * molar_v * molar_fraction) / (R * T)
6ee524f0fd6face8cc8037a80607b720663557fc
574,070
def return_statement(printer, ast): """Prints "return expr;".""" expr_str = printer.ast_to_string(ast["expr"]) return f'return {expr_str};'
99f9126297407c7ddeb77a50a44e2e0c0727331f
230,274
def _delta(i, j): """The Kronecker delta. Returns 1 if i == j, 0 otherwise.""" return int(i == j)
0604992804236694b4698679deed5f190eee137c
88,520
def grade_display(actual, max_grade): """ Nicely formats the grades for display to the user """ def formatter(value): if round(value, 8) == round(value,0): return '%0.0f' % value else: return '%0.1f' % value if actual is not None: return '%s/%s' % (formatter(actual), formatter(max_grade)) else: return '%s' % formatter(max_grade)
8fdf05afd4ada8d9a9f10079ea25845b9462c639
177,529
import math def mercator2wgs(mercatorLat, mercatorLon): """ Web mercator to WGS-84 mercatorLat -> y mercatorLon -> x """ x = mercatorLon / 20037508.34 * 180 y = mercatorLat / 20037508.34 * 180 y = 180 / math.pi * (2 * math.atan(math.exp(y * math.pi / 180)) - math.pi / 2) return y, x
a5064ae82b70aa99abcf727d76f79da40b05094b
538,278
def find_sequences_before(context, strip): """ Returns a list of sequences that are before the strip in the current context """ return [s for s in context.sequences if s.frame_final_end <= strip.frame_final_start]
d49a950c06c2a92d076d9790055c21d30afdd627
8,903
from typing import List from typing import Tuple def get_lm(l_max: int) -> List[Tuple[int, int]]: """Get list of all (l,m) in order up to (and including) l_max""" return [(l, m) for l in range(l_max + 1) for m in range(-l, l + 1)]
b18a8da2657032fcec10a7dd0bfb2fdd26275f80
68,728
def gen_workspace_tfvars_files(environment, region): """Generate possible Terraform workspace tfvars filenames.""" return [ # Give preference to explicit environment-region files "%s-%s.tfvars" % (environment, region), # Fallback to environment name only "%s.tfvars" % environment, ]
a1e822451b9652b846eaf21c6323943678e75d84
682,893
def prepare_commentdoc(s): """ Extract documentation comment lines (starting with #:) and return them as a list of lines. Returns an empty list if there is no documentation. """ result = [] lines = [line.strip() for line in s.expandtabs().splitlines()] for line in lines: if line.startswith('#:'): line = line[2:] # the first space after the comment is ignored if line and line[0] == ' ': line = line[1:] result.append(line) if result and result[-1]: result.append('') return result
f6c7917a572c15c3cfde3719af2b28a8cfec6f42
397,339
def get_provenance_record(caption, ancestor_files, **kwargs): """Create a provenance record describing the diagnostic data and plot.""" record = { 'caption': caption, 'authors': ['schlund_manuel'], 'references': ['acknow_project'], 'ancestors': ancestor_files, } record.update(kwargs) return record
9ecfe152c21b27854fd9082c63f910b5ccbd9df8
33,985
def set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv, list_bus_id_power_hiding_priority=None, list_bus_id_voltage_hiding_priority=None): """ Returns the list of the hidden power bus ids and a list of hidden voltage ids :param num_nodes: number of buses in the grid :param Ns: Number of observable power measurements in the last time step :param Nv: Number of observable voltage measurements in the last time step :param list_bus_id_power_hiding_priority: list of bus indices which was sorted according to the preferred order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden. :param list_bus_id_voltage_hiding_priority: list of bus indices which was sorted according to the preferred order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden. :return: """ if list_bus_id_power_hiding_priority is None: list_bus_id_power_hiding_priority = list(range(num_nodes)) if list_bus_id_voltage_hiding_priority is None: list_bus_id_voltage_hiding_priority = list(range(num_nodes)) hidden_power_bus_id_list = [] next_busid_to_hide = 0 for bus_id in range(Ns, num_nodes): hidden_power_bus_id_list.append(list_bus_id_power_hiding_priority[next_busid_to_hide]) next_busid_to_hide += 1 hidden_voltage_bus_id_list = [] next_busid_to_hide = 0 for bus_id in range(Nv, num_nodes): hidden_voltage_bus_id_list.append(list_bus_id_voltage_hiding_priority[next_busid_to_hide]) next_busid_to_hide += 1 hidden_power_bus_id_list.sort() hidden_voltage_bus_id_list.sort() return hidden_power_bus_id_list, hidden_voltage_bus_id_list
aefdf4e2a4179e732387169a8e0f96e581ee5052
683,730
def partition_hostname(hostname): """Return a hostname separated into host and domain parts.""" parts = hostname.partition('.') return dict(hostname=parts[0], domainname=parts[2] if parts[1] == '.' else None)
fd56a97ed369e33252dc44e474c10bd1fe7daba0
626,604
def text_length(text): """ Get the effective text length in characters, taking into account newlines """ if not text: return 0 lines = text.split("\n") return max(len(line) for line in lines)
d21e5529e23a2e212a6db6092494d2673f688446
245,093
def n_wise(iterable, n): """Use to separate an iterable by n-elements groups""" a = iter(iterable) return zip(*[a] * n)
b623d2da3ff5d1b6c1a01c362c0ed70137ed6c7f
632,497
def extract_attributes(browser): """ Extract attribute tags of avatar image from web page :param browser: opened browser :type browser: webdriver.Chrome :return: attribute tags of avatar image :rtype: list """ attr_tags = browser.find_elements_by_class_name('attr-tag') attr_results = [] for attr_tag in attr_tags: if attr_tag.text != '+ Show sub-attributes': attr_results.append(attr_tag.text) else: attr_tag.click() return attr_results
2b0bee77949da23b3aee25543c57552903a39ada
464,933
def _parse_is_sub_array_assignment(line): """ :param line: String line :returns: True if the statment is a sub array assignment. False otherwise. >>> line = 'Map(:,:,1) = [1 2 3];' >>> _parse_is_sub_array_assignment(line) True """ lhs = line.split(' = ')[0].strip() if '(' in lhs: return True return False
44f521ad5a6c4951bea362399e33d5e4c401f6b1
195,529
def _create_price_offering( client, header, description, final_url, final_mobile_url, price_in_micros, currency_code, unit, ): """Creates a PriceOffering instance and returns it. Args: client: an initialized GoogleAdsClient instance. header: The header of the price offering. description: The description of the price offering. final_url: The final_url of the price offering. final_mobile_url: The final_mobile_url of the price offering. price_in_micros: The price of the price offering. currency_code: The currency_code of the price offering. unit: The price unit of the price offering. Returns: A PriceOffering instance. """ price_offering = client.get_type("PriceOffering") price_offering.header = header price_offering.description = description price_offering.final_url = final_url # Check if this exists, since we pass None for one of the PriceOfferings # in the _create_price_asset method and assigning None to this field # raises an error. if final_mobile_url: price_offering.final_mobile_url = final_mobile_url price_offering.price.amount_micros = price_in_micros price_offering.price.currency_code = currency_code price_offering.unit = unit return price_offering
836d3fcc041b9d74f5e9c6aa227248eebcf23227
584,512
def calculate_ema(close, periods, previous_ema): """ Calculates the exponential moving average. EMA = Price(t)*weighting_multipler + previous_ema*(1-weighting_multiplier) *weighting_multiplier is given by 2/(periods + 1) Args: close: Float representing the exchange rate at the end of an interval periods: Integer representing the number of days in the EMA period (commonly 12 or 26) previous_ema: Float representing the last calculated EMA Returns: Float representing the new EMA """ return close*(2/(periods + 1)) + previous_ema*(1-(2/(periods + 1)))
5aaa30eb204c6cc58f531867a86f0686f79e1d53
528,006
from typing import List def get_mid_slice_indices(num_slices: int, mid_slice_range: int) -> List[int]: """ Get the indices of the slices around the mid-slice image. Args: num_slices: The number of slices of the volume. mid_slice_range: The number of slices around the mid-slice image on each side for which we want the indices. Returns: The indices of the mid-slice +/- mid_slice_range. """ mid_slice_index = num_slices // 2 mid_slice_indices = list(range(mid_slice_index - mid_slice_range, mid_slice_index + mid_slice_range + 1)) assert mid_slice_indices[0] >= 0 return mid_slice_indices
59aabfd42153afa77eabe2ea85cf7821af4d1abb
608,313
def process_complete(data): """Check the processing status of a data object.""" busy = ['UP', 'RE', 'WT', 'PR'] data.update() if data.status == 'OK': return True elif data.status in busy: return False elif data.status == 'ER': raise ValueError('Problem processing data object: {}'.format(data.name))
b00286f0acff9e7b2e492dbe6fbf8c4c0e3642b2
228,269
def get_padding_same(kernel_size, dilation_rate): """ SAME padding implementation given kernel_size and dilation_rate. The calculation formula as following: (F-(k+(k -1)*(r-1))+2*p)/s + 1 = F_new where F: a feature map k: kernel size, r: dilation rate, p: padding value, s: stride F_new: new feature map Args: kernel_size (int) dilation_rate (int) Returns: padding_same (int): padding value """ k = kernel_size r = dilation_rate padding_same = (k + (k - 1) * (r - 1) - 1)//2 return padding_same
1ccf185c8170339d533d475eda70dc612cc19cc8
262,023
def version_param(params): """Return the value of the HTTP version parameter, or `None` if no version parameter is supplied. """ for k, v in params.items(): if k.lower() == "version": return v
61348b87007c253dc6533827de4d8f302779f107
660,879
import yaml import logging def replace_firm_names(df, settings_path): """Replace firm names as specified in settings.yaml""" with open(settings_path, encoding="utf8") as file: settings = yaml.safe_load(file) try: settings["query"]["firm_names"] except Exception: logging.warning( "No firm names specified in settings['query']['firm_name']. \ Firm names still contain legal suffix which compromises search results." ) assert ( "name" in df.columns ), "Dataframe has no name column. Firm names cannot be replaced." replace_firm_names = settings["query"]["firm_names"] df["firm_name"] = df.name.replace(replace_firm_names, regex=True).str.strip() return df
af86f12cab7e310495b8081e4d1dde37f8294e67
482,821
import torch def l2_norm(data_set): """Divide each feature vector by its l2 norm.""" data_set = data_set / (torch.norm(data_set, dim=1).unsqueeze(dim=1) + 0.0001) return data_set
0ee891b7f9c8b299b110e30a8894e7c17270f70c
471,633
def strip_quotes(text: str) -> str: """Remove the double quotes surrounding a string.""" text = text.strip(' "') return text
faad9880e2a99472fe60cd415413852b67dd575f
306,487
def get_suffix(filename, ignore_dot=True): """获取文件名的后缀名 :param filename: 文件名 :param ignore_dot: 是否忽略后缀名前面的点 :return: 文件的后缀名 """ # 从字符串中逆向查找.出现的位置 pos = filename.rfind('.') # 通过切片操作从文件名中取出后缀名 if pos <= 0: return '' return filename[pos + 1:] if ignore_dot else filename[pos:]
ea161dc46642a6562b7324b2ecf0a335cedefbda
443,664
def diagpq(p,q=0): """ Return string equivalent metric tensor for signature (p,q). """ n = p+q D = '' rn = list(range(n)) for i in rn: for j in rn: if i ==j: if i < p: D += '1 ' else: D += '-1 ' else: D += '0 ' D = D[:-1]+',' return(D)
97f52da6a6b488eb74f9d9b759de0b2907b4295d
618,416
def handle_duplicate_columns(DataFrame, action='rename'): #'drop' """ Parameters ---------- DataFrame : pandas.DataFrame DataFrame action : {'rename', 'drop'}, dafault 'rename' Action to be taken on duplicate columns Returns ------- DataFrame : pandas.DataFrame """ is_duplicate = DataFrame.columns.duplicated() columns = list(DataFrame.columns) if action=='rename': for i in range(len(columns)): if is_duplicate[i]: columns[i]=columns[i]+'_' DataFrame.columns = columns elif action=='drop': DataFrame = DataFrame.loc[:,~is_duplicate] else: print('No valid action (rename or drop) provided!') return DataFrame
319d381b45eb3585adde1f03dbadf8c1eba400a9
231,116
def is_string(value) -> bool: """ Check if value is string """ return isinstance(value, str)
6e199a8881b48c2bb131ab66e9602dacec8c7fbf
463,543
def _check_sobol(grid, res): """ Checks and unpacks whether sobol interpolation has been requested, and if Cartesian interpolation has been requested for Sobol grid, which is invalid. Parameters ---------- grid : h5py file Handle of grid to process res : dict Dictionary of all the inputted resolution parameters Returns ------- sobol : float/bool The scale value for across resolution if Sobol interpolation, False if not Sobol """ # Read gridtype from header | Allow for usage of both h5py 2.10.x and 3.x.x # --> If things are encoded as bytes, they must be made into standard strings gridtype = grid["header/library_type"][()] if isinstance(gridtype, bytes): gridtype = gridtype.decode("utf-8") # Check type and inputted scale resolution if "sobol" in gridtype.lower(): if res["scale"] < 1.0: errstr = "For Sobol type grid only an increase in tracks via the 'scale' " errstr += "parameter is possible, please enter a value > 1." raise KeyError(errstr) sobol = res["scale"] elif "cartesian" in gridtype.lower(): if res["scale"] > 1.0: sobol = res["scale"] else: sobol = False elif "isochrones" in gridtype.lower(): if res["scale"] < 1.0: errstr = "For isochrone grids only an increase in isochrones via the " errstr += "'scale' parameter is possible, please enter a value > 1." raise KeyError(errstr) sobol = res["scale"] else: raise KeyError( "Interpolation not possible for grid of type {0}".format(gridtype) ) # Highlight redundant resolution for the user if sobol: for var in res: if (var not in ["scale", "baseparam"]) and res[var] != 0: prtstr = "Gridresolution in '{0}' is set but ignored, ".format(var) prtstr += "as 'scale' is set for Sobol interpolation." print(prtstr) return sobol
f92759821bb4f35b79dda8763e071d3fbea290cd
184,616