content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import six def _hex2rgb(h): """Transform rgb hex representation into rgb tuple of ints representation""" assert isinstance(h, six.string_types) if h.lower().startswith('0x'): h = h[2:] if len(h) == 3: return (int(h[0] * 2, base=16), int(h[1] * 2, base=16), int(h[2] * 2, base=16)) if len(h) == 6: return (int(h[0:2], base=16), int(h[2:4], base=16), int(h[4:6], base=16)) raise ValueError('Invalid hex RGB value.')
6f5cf531c22d8f51107924d6c7f9cc6cca75b5bc
654,144
def shift_point_by_markersize(axes, x, y, markersize): """ Shift overlapping points alonmg x axis by half of the markersize. This allows to show better plots with errorbars. """ inv = axes.transData.inverted() points = [(i,j) for i,j in zip(x,y)] pixels = axes.transData.transform(points) res = inv.transform(pixels+(markersize/2,0)) return res[:,0], res[:, 1]
fd2e88eee038ef443384b5c520cb30018c9bda47
651,380
def exist(ubids, specs): """Checks that all ubids are in the specs Args: ubids (seq): [ubid1, ubid2, ubid3, ...] specs (seq): [{spec1}, {spec2}, {spec3}] Returns: bool: True or False """ return set(ubids).issubset(set(map(lambda x: x['ubid'], specs)))
ac6d77fbf51972fa1a20dbe1c5763db5bce5d8f4
589,396
import torch def qexp_t(q): """ Applies exponential map to log quaternion :param q: N x 3 :return: N x 4 """ n = torch.norm(q, p=2, dim=1, keepdim=True) n = torch.clamp(n, min=1e-8) q = q * torch.sin(n) q = q / n q = torch.cat((torch.cos(n), q), dim=1) return q
6db3edab4c2bbaf4176625f35381d456ee1aeaa5
644,690
def get_worker_name(worker_id): """Returns `/job:tpu_worker/task:{worker_id}`.""" return f'/job:tpu_worker/task:{worker_id}'
ee9d551d9eaa296f8b81c4147116ba67b320c883
530,074
import base64 def clean_data(records): """ Clean up records to be json.dump-able and sent to subscriber """ for record in records: timestamp = record['ApproximateArrivalTimestamp'].timestamp() decoded_data = base64.b64encode(record['Data']).decode( 'utf-8') # Needs to be base64'd and decoded to match regular kinesis record['ApproximateArrivalTimestamp'] = timestamp record['Data'] = decoded_data return records
dd21b657dd7a9840a9cf732888aee267dada0be0
180,410
import hashlib def hash_160(public_key): """perform the sha256 operation followed by the ripemd160 operation""" hash256 = hashlib.sha256(public_key).digest() return hashlib.new('ripemd160', hash256).digest()
57ae11140ff7439747af6abffbd6b8854bd19095
246,275
def read_list_from_file(filename): """ Reads a file with table/file names in it and returns a list of the names """ names = [] with open(filename, 'r') as file: for line in file: line = line.strip() if line != '': names.append(line) return names
fe2475626641c3d92565e0c8ab7e35000ff62c06
533,012
import random def add_parameter_noise(cfg, noise_ratio): """ Add noise to the hyperparameters of the Dynamic DropConnect model. This changes the input dictionary in-place. Args: cfg: Dictionary containing the configuration with all hyperparameters noise_ratio: Ratio of noise relative to magnitude of parameter base value Returns: Perturbed hyperparameter dictionary """ # Add uniform noise scaled relative to magnitude of input float. def _add_relative_uniform_noise(x, noise_ratio): noise_range = noise_ratio * abs(x) return random.uniform(x - noise_range, x + noise_range) # Noise only applies to the hyperparemeters of the model that we tune cfg['prob_drift_down'] = _add_relative_uniform_noise(cfg['prob_drift_down'], noise_ratio) cfg['prob_drift_up'] = _add_relative_uniform_noise(cfg['prob_drift_up'], noise_ratio) cfg['prob_freeze'] = _add_relative_uniform_noise(cfg['prob_freeze'], noise_ratio) cfg['grad_threshold'] = _add_relative_uniform_noise(cfg['grad_threshold'], noise_ratio) return cfg
c46a96dbcd6f468c825f0a9fe0457ab7566541ed
664,447
def max_oslevel(dic): """ Find the maximum value of a the oslevel dictionary. arguments: dic - Dictionnary {client: oslevel} return: maximum oslevel from the dictionnary """ oslevel_max = None for key, value in iter(dic.items()): if oslevel_max is None or value > oslevel_max: oslevel_max = value return oslevel_max
7c2db2ec811ec4182bff7cf09f8b92bb226b8f8e
490,045
def rotate_l (sequence) : """Return a copy of sequence that is rotated left by one element >>> rotate_l ([1, 2, 3]) [2, 3, 1] >>> rotate_l ([1]) [1] >>> rotate_l ([]) [] """ return sequence [1:] + sequence [:1]
e334448aa30ca6ce6f7cc32fe1761ad9d2c38890
320,264
import six def _data_source_ref_search(job_configs, func, prune=lambda x: x): """Return a list of unique values in job_configs filtered by func(). Loop over the 'args', 'configs' and 'params' elements in job_configs and return a list of all values for which func(value) is True. Optionally provide a 'prune' function that is applied to values before they are added to the return value. """ args = set([prune(arg) for arg in job_configs.get( 'args', []) if func(arg)]) configs = set([prune(val) for val in six.itervalues( job_configs.get('configs', {})) if func(val)]) params = set([prune(val) for val in six.itervalues( job_configs.get('params', {})) if func(val)]) return list(args | configs | params)
2cc1ddff61711cb6b82e1ca638232cb6d03c7d23
173,857
def _get_asl_pipeline(aml_model): """ Extract the pipeline object from an autosklearn_optimizer model. """ # this is from the old development branch # the model *contained* a pipeline #aml_model_pipeline = aml_model.pipeline_ # this is the updated 0.1.3 branch # that is, the model simply *is* a pipeline now asl_pipeline = aml_model return asl_pipeline
9fa28be73e97b2e3682ad7fb416418288f9bf828
495,673
def is_colored(G, vertex): """Returns whether the vertex is colored or not""" return G.nodes[vertex]['node'].color is not None
dce194da8abd36ab9bb11d89a5f6e846072b5c6e
237,463
import re def validate_ip(ip): """Check if the IP address has correct format. return validated and trimmed IP address as string or False if not valid """ if not ip: return False ip = ip.strip() m = re.match(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$", ip) if m: a1, a2, a3, a4 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)) if a1<256 and a2<256 and a3<256 and a4<256: ip_canon = "{}.{}.{}.{}".format(a1, a2, a3, a4) return ip_canon return False
d62248587c1e53e93cf99c87759ec5218b7a543b
370,076
def parm_is_time_dependent(parm): """Checks if parm is time-dependent. """ return parm.isTimeDependent()
3166ad8ea301f208a8255e0658c091af102e1dc4
638,204
def is_named(template, label): """ Check if the name of the label is in template :param template: template of labels :param label: label to check :return: True if label's name is in template (bool) """ for temp in template: if(temp.n == label.n): return True return False
4eedcb7554b9fb0b1262dc3a36023618f88d9282
369,108
def _find_run_id(traces, item_id): """Find newest run_id for an automation.""" for trace in reversed(traces): if trace["item_id"] == item_id: return trace["run_id"] return None
acc60e73dc8c958de245b94d95e7aadc4cc5f12c
60,572
import random def backoff(base=2, factor=1.1, max_value=None): """Generator for exponential decay. The Google docs warn to back off from polling their API if there is no work available in a task queue. So we do. This method should be used as follows: my_backoff = backoff(...) ... if no_items_in_queue: time.sleep(next(my_backoff)) else: my_backoff.send('reset') If its more convenient, you can re-initialize the generator rather than sending the `reset` event. Params: base: the mathematical base of the exponentiation operation factor: factor to multiply the exponentation by. max_value: The maximum value to yield. Once the value in the true exponential sequence exceeds this, the value of max_value will forever after be yielded. """ def init(): return 0 n = init() while True: a = factor * base ** n if max_value is None or a < max_value: n += 1 val = (yield a) else: val = (yield max_value - random.random() * max_value / 10) if val == 'reset': # generally, we discard the generator's output from calling # backoff().send('reset') # so we init()-1 here to ensure the following call to # next(backoff()) # is correct n = init() - 1
3490dc0e7aea3288a09797153407caead45d3553
475,031
def bits_table_m() -> dict[int, int]: """The table of the maximum amount of information (bits) for the version M. Returns: dict[int, int]: Dictionary of the form {version: number of bits} """ table = { 1: 128, 2: 224, 3: 352, 4: 512, 5: 688, 6: 864, 7: 992, 8: 1232, 9: 1456, 10: 1728, 11: 2032, 12: 2320, 13: 2672, 14: 2920, 15: 3320, 16: 3624, 17: 4056, 18: 4504, 19: 5016, 20: 5352, 21: 5712, 22: 6256, 23: 6880, 24: 7312, 25: 8000, 26: 8496, 27: 9024, 28: 9544, 29: 10136, 30: 10984, 31: 11640, 32: 12328, 33: 13048, 34: 13800, 35: 14496, 36: 15312, 37: 15936, 38: 16816, 39: 17728, 40: 18672 } return table
74353546ce6d29bef6e5fd6eb05a218a0b57ddd3
377,019
def collatz_sequence(n): """ Return the list of Collatz sequences based on a rule: n → n/2 (n is even) n → 3n + 1 (n is odd) Eg. collatz_sequence(13) returns 13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1 Parameters ---------- n (int): An int number Return ------ seq (list of int): collatz_sequence """ if not isinstance(n, int) or n < 1: return [] seq = [n] while n > 1: if n%2 == 0: n = n/2 else: n = 3*n + 1 seq.append(int(n)) return seq
7d11224754e426003479f5127268e0e71604155b
412,303
def process_time(coverage): """Process time coverage range so it can be added to the dates metadata Parameters ---------- coverage : list [From date, to date] Returns ------- time_range : dict Dictionary following metadata schema for dates """ time_range = {'date': f"{coverage[0]}/{coverage[1]}", 'description': "Time range covered by data", 'type': { 'id': "coverage", 'title': {'en': "Temporal coverage"} } } return time_range
54564fc8408885ff9fd814df30734058ae68304b
260,640
from typing import List def permute_list(x: List, permutation_indices: List[int]) -> List: """Permute `x` according to the indices in `permutation_indices`""" return [x[i] for i in permutation_indices]
80c7007683b3cc69922a0e8fa2fa30e541fd70de
633,398
def _rpmvercmp(a: str, b: str): """ This function is backported from C function `rpmvercmp`. https://github.com/rpm-software-management/rpm/blob/9e4caf0fc536d1244b298abd9dc4c535b6560d69/rpmio/rpmvercmp.c#L16 """ if a == b: return 0 str1 = a + "\0" str2 = b + "\0" one = 0 two = 0 while str1[one] != "\0" or str2[two] != "\0": while str1[one] != "\0" and not str1[one].isalnum() and str1[one] not in ["~", "^"]: one += 1 while str2[two] != "\0" and not str2[two].isalnum() and str2[two] not in ["~", "^"]: two += 1 # handle the tilde separator, it sorts before everything else if str1[one] == "~" or str2[two] == "~": if str1[one] != "~": return 1 if str2[two] != "~": return -1 one += 1 two += 1 continue # Handle caret separator. Concept is the same as tilde, # except that if one of the strings ends (base version), # the other is considered as higher version. if str1[one] == "^" or str2[two] == "^": if str1[one] == "\0": return -1 if str2[two] == "\0": return 1 if str1[one] != "^": return 1 if str2[two] != "^": return -1 one += 1 two += 1 continue # If we ran to the end of either, we are finished with the loop if not (str1[one] != "\0" and str2[two] != "\0"): break p = one q = two # grab first completely alpha or completely numeric segment # leave one and two pointing to the start of the alpha or numeric # segment and walk str1 and str2 to end of segment if str1[p].isdigit(): while(str1[p] != "\0" and str1[p].isdigit()): p += 1 while(str2[q] != "\0" and str2[q].isdigit()): q += 1 isnum = 1 else: while(str1[p] != "\0" and str1[p].isalpha()): p += 1 while(str2[q] != "\0" and str2[q].isalpha()): q += 1 isnum = 0 # this cannot happen, as we previously tested to make sure that # the first string has a non-null segment if one == p: return -1 # arbitrary # take care of the case where the two version segments are # different types: one numeric, the other alpha (i.e. empty) # numeric segments are always newer than alpha segments if two == q: return 1 if isnum else -1 if isnum: # throw away any leading zeros - it's a number, right? while str1[one] == "0": one += 1 while str2[two] == "0": two += 1 # whichever number has more digits wins onelen = p - one twolen = q - two if onelen > twolen: return 1 if onelen < twolen: return -1 # strcmp will return which one is greater - even if the two # segments are alpha or if they are numeric. don't return # if they are equal because there might be more segments to # compare if str1[one:p] < str2[two:q]: return -1 if str1[one:p] > str2[two:q]: return 1 one = p two = q # this catches the case where all numeric and alpha segments have # compared identically but the segment sepparating characters were # different if str1[one] == "\0" and str2[two] == "\0": return 0 # whichever version still has characters left over wins return -1 if str1[one] == "\0" else 1
4f71c0c25e92869fe928dc0ff50a245935b5507c
463,900
def td_to_s(td): """ Convert timedelta to seconds since start of day. """ # if not td.seconds and td.days: # return 24*60*60 return td.seconds
377edcdbb28a01aff9639697b30b7554201d2279
114,119
def comp_desc_dict(self): """Compute a dictionnary with the main parameters/output of the machine Parameters ---------- self : Machine A Machine object Returns ------- desc_dict: list list of dictionnary containing the main parameters of the machine """ desc_dict = list() # Machine Type desc_str = self.get_machine_type() desc_dict.append( dict( { "name": "Type", "path": "type(machine)", "verbose": "Machine Type", "type": str, "unit": "", "is_input": False, "value": desc_str.split(" ")[0], } ) ) # Zs desc_dict.append( dict( { "name": "Zs", "path": "machine.stator.slot.Zs", "verbose": "Stator slot number", "type": int, "unit": "", "is_input": True, "value": self.stator.slot.Zs, } ) ) # p desc_dict.append( dict( { "name": "p", "path": "machine.stator.winding.p", "verbose": "Pole pair number", "type": int, "unit": "", "is_input": True, "value": self.stator.get_pole_pair_number(), } ) ) # is_inner_rotor if self.rotor.is_internal: inner = "Inner Rotor" else: inner = "Outer Rotor" desc_dict.append( dict( { "name": "Topology", "path": "machine.rotor.is_internal", "verbose": "Topology", "type": str, "unit": "", "is_input": False, "value": inner, } ) ) # qs desc_dict.append( dict( { "name": "qs", "path": "machine.stator.winding.qs", "verbose": "Stator phase number", "type": int, "unit": "", "is_input": True, "value": self.stator.winding.qs, } ) ) # Stator winding resistance try: Rwind = self.stator.comp_resistance_wind() except Exception: Rwind = None desc_dict.append( dict( { "name": "Rwinds", "path": "machine.stator.comp_resistance_wind()", "verbose": "Stator winding resistance", "type": float, "unit": "Ohm", "is_input": False, "value": Rwind, } ) ) # Machine mass try: Mmach = self.comp_masses()["Mmach"] except Exception: Mmach = None desc_dict.append( dict( { "name": "Mmachine", "path": "machine.comp_masses()['Mmach']", "verbose": "Machine total mass", "type": float, "unit": "kg", "is_input": False, "value": Mmach, } ) ) return desc_dict
95c009f38c8bf682362dfba03875aa1c239cd6ac
411,558
from typing import Counter def string_uniqueness(string_to_check): """ Check how many unique chars are into a string Args: string_to_check (string): The string to be checked Returns: [int]: Returns an int which represents how many unique chars are in string """ frequency = Counter(string_to_check) return len(frequency)
e1b4ac05746a396268a1cf579090d220c74896dd
461,969
def check_OOV_terms(embedding_model, word_listing): """ Checks differences between pre-trained embedding model vocabulary and dataset specific vocabulary in order to highlight out-of-vocabulary terms. :param embedding_model: pre-trained word embedding model (gensim wrapper) :param word_listing: dataset specific vocabulary (list) :return - list of OOV terms """ embedding_vocabulary = set(embedding_model.key_to_index.keys()) oov = set(word_listing).difference(embedding_vocabulary) return list(oov)
44230ead4cd34d0667d68f99f91495c25fccd500
147,648
def find_sink_node(digr): """ Finds a sink node (node with all incoming arcs) in the directed graph. Valid for a acyclic graph only """ # first node is taken as a default node = digr.nodes()[0] while digr.neighbors(node): node = digr.neighbors(node)[0] return node
1c90704a5e80792f3f03a4317dae39a926964a2c
496,194
import torch def compute_reconstruction_error(vae, data_loader, **kwargs): """ Computes log p(x/z), which is the reconstruction error. Differs from the marginal log likelihood, but still gives good insights on the modeling of the data, and is fast to compute. """ # Iterate once over the data and computes the reconstruction error log_lkl = {} for tensors in data_loader: loss_kwargs = dict(kl_weight=1) _, _, losses = vae(tensors, loss_kwargs=loss_kwargs) for key, value in losses._reconstruction_loss.items(): if key in log_lkl: log_lkl[key] += torch.sum(value).item() else: log_lkl[key] = 0.0 n_samples = len(data_loader.indices) for key, value in log_lkl.items(): log_lkl[key] = log_lkl[key] / n_samples log_lkl[key] = -log_lkl[key] return log_lkl
ee4fa35002e0f43d6482ae7f9ccfc7b7b1282db7
699,427
def codify(codemap, filtercodes): """ Filters codes list in values to renamed/ignored list of codes :param dict codemap: Mapping of codes to names to use in display :param list filtercodes: List of codes to ignore """ def inner(values): codes = [str(codemap.get(code, code)) for code in values if code and code not in filtercodes] return filter(lambda c: len(c), codes) return inner
efa5b92b1e9641f5e7e37b993232788b99741a43
558,426
def _symmetric_dict( dictionary: dict[tuple[str, str], float] ) -> dict[tuple[str, str], float]: """Make a dictionary symmetric.""" for (i, j), constant in list(dictionary.items()): dictionary[j, i] = constant return dictionary
e2370bc04fab6b6bbbd3e8fe1dd38e4b1ca69943
270,056
def get_vector(string): """ Converts string to unique number (using bytes and little endian - hint: needs to be unique and reversible). :param string: The string to vectorize. :return: Unique integer of input string. """ return int.from_bytes(string.encode(), 'little')
72cdb246f589c706d634def34922e20852f045db
303,726
def drop_empty_props(item): """Remove properties with empty strings from nested dicts. """ if isinstance(item, list): return [drop_empty_props(i) for i in item] if isinstance(item, dict): return { k: drop_empty_props(v) for k, v in item.items() if v != '' } return item
c427c01dc282b2cf42fa2fa701cdc707142c3088
100,420
import base64 def get_ssh_certificate_tokens(module, ssh_cert_path): """ Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate. """ # This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout']) if rc != 0: module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr) fingerprint = stdout.strip()[17:].replace(':', '') rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:']) if rc != 0: module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr) pkcs12_base64 = base64.b64encode(stdout.strip()) return (fingerprint, pkcs12_base64)
2b121fc3b613c5e269542d8d414975cd816643e3
450,444
def _dist(p, q): """Returns the squared Euclidean distance between p and q.""" dx, dy = q[0] - p[0], q[1] - p[1] return dx * dx + dy * dy
da387e1e8298e962add266d131528ffc435de10d
44,324
from typing import Optional def _min_max( min: Optional[int] = None, # noqa: WPS125 max: Optional[int] = None, # noqa: WPS125 ): """Validator to check that value is in bounds.""" def factory(instance, attribute, field_value): min_contract = min is not None and field_value < min max_contract = max is not None and field_value > max if min_contract or max_contract: raise ValueError('Option {0} is out of bounds: {1}'.format( attribute.name, field_value, )) return factory
24ba61b03df9b01ead2bdac54b2059352bda3293
329,109
def span(data, **kwargs): """ Compute the difference of largest and smallest data point. """ return max(data) - min(data)
885cb57cc337c3ed19137da601d43a0ce0f0b750
75,330
import re def IsValidVersion(version): """ Return true if the version is a valid ID (a quad like 0.0.0.0). """ pat = re.compile('^\d+\.\d+\.\d+\.\d+$') return pat.search(version)
c52d7eb4a9fb5811908e96acdee098265a294ec1
176,378
import re def matches(regex, subject): """ Returns True if the regex string matches the subject """ return re.match(regex, subject) is not None
9f28bb71566de312cf59dae2c2050306fa9b9888
258,813
import yaml def yaml_to_base_type(node, loader): """ Converts a PyYAML node type to a basic Python data type. Parameters ---------- node : yaml.Node The node is converted to a basic Python type using the following: - MappingNode -> dict - SequenceNode -> list - ScalarNode -> str, int, float etc. loader : yaml.Loader Returns ------- basic : object Basic Python data type. """ if isinstance(node, yaml.MappingNode): return loader.construct_mapping(node, deep=True) elif isinstance(node, yaml.SequenceNode): return loader.construct_sequence(node, deep=True) elif isinstance(node, yaml.ScalarNode): return loader.construct_scalar(node) else: raise TypeError("Don't know how to implicitly construct '{0}'".format( type(node)))
f72650a71a7e3e7bcc1fe460c6e72acb7519fc98
680,578
import keyword def is_valid_pkg_name(pkg_name): """Check that the given name is a valid python package name.""" if keyword.iskeyword(pkg_name): return False return pkg_name.isidentifier()
42b6a427c0956874cc0177ef169f6aba8616e473
360,865
def make_list(item_or_sequence): """Coerce a sequence or non-sequence to a list.""" if isinstance(item_or_sequence, list): return item_or_sequence if isinstance(item_or_sequence, tuple): return list(item_or_sequence) return [item_or_sequence]
6e9fdbb5ff52bbcf9c5cd17c5df1ce008152c870
254,542
def key_entry_name_order(entry): """Sort key for tree entry in name order.""" return entry[0]
82b57ffdb16da8b83c859c6ffeb9c4bb20c8baa8
255,062
from typing import Optional from pathlib import Path def get_aml_user_folder() -> Optional[Path]: """Return the root of the user folder.""" path_parts = Path(".").absolute().parts if "Users" not in path_parts: return None # find the index of the last occurrence of "users" users_idx = len(path_parts) - path_parts[::-1].index("Users") # the user folder is one item below this if len(path_parts) < users_idx + 1: return None return Path("/".join(path_parts[: users_idx + 1]))
d5f9dd480dfd285aa51950c83d1b0eae66d40d4c
612,552
def selection_to_string(selection): """Convert dictionary of coordinates to a string for labels. Parameters ---------- selection : dict[Any] -> Any Returns ------- str key1: value1, key2: value2, ... """ return ', '.join(['{}: {}'.format(k, v) for k, v in selection.items()])
ed33f8a671cf83e4af4e394c84210da2abab52cc
553,277
import math def nCr(n,r): """nCr(n,r) returns n choose r""" if n>=r: return math.factorial(n)//math.factorial(r)//math.factorial(n-r) else: return 0
ca8cc957f0f52f0137d111630eaeb584bb422fb2
446,215
import re def normalize_pandoc(text: str) -> str: """ Series of normalization steps for pandoc generated text Parameters ---------- text : str Pandoc text Returns ------- str Normalized pandoc text """ text_norm = re.sub(r"(\r\n)", '\n', text) text_norm = re.sub(r"( {4})", '\t', text_norm) text_norm = re.sub(r'(-----\n\nGenerated by.*)', '----', text_norm) return text_norm
63adde2f67a68dd64bdb6f11cf8f03e1ea00ad7c
260,414
def abs_sqd(x): """Element-wise absolute value squared.""" return x.real**2 + x.imag**2
dee260169027ec69eafa9abee1b46a858d522a31
88,256
import mpmath def logbeta(x, y): """ Natural logarithm of beta(x, y). The beta function is Gamma(x) Gamma(y) beta(x, y) = ----------------- Gamma(x + y) where Gamma(z) is the Gamma function. """ with mpmath.extradps(5): return (mpmath.loggamma(x) + mpmath.loggamma(y) - mpmath.loggamma(mpmath.fsum([x, y])))
7d8e00eb6934b013242b0b64843a5cc50ac73b87
432,597
def get_original_order_from_reordered_list(L, ordering): """ Returns the original order from a reordered list. """ ret = [None] * len(ordering) for orig_idx, ordered_idx in enumerate(ordering): ret[ordered_idx] = L[orig_idx] return ret
390219d804668a2085ca07b6863b45eb7d5cbc69
577,001
def get_extent(ds): """ Returns the bounding box of tiff image. """ geo_t = ds.GetGeoTransform() x_size, y_size = ds.RasterXSize, ds.RasterYSize xmin = min(geo_t[0], geo_t[0] + x_size * geo_t[1]) xmax = max(geo_t[0], geo_t[0] + x_size * geo_t[1]) ymin = min(geo_t[3], geo_t[3] + y_size * geo_t[5]) ymax = max(geo_t[3], geo_t[3] + y_size * geo_t[5]) return xmin, xmax, ymin, ymax
48b05d42e826ea79cc8afc1a884ad329e1b8dbb5
86,170
from pathlib import Path def _get_json_filename_from_scan_filename(scan_filename: Path) -> Path: """Replace the given `scan_filename` extensions with '.json'.""" extensions = "".join(scan_filename.suffixes) return Path(str(scan_filename).replace(extensions, ".json"))
e43233b14fff2fac0319c9bdf31f420c61f64af0
297,525
def fix_doc(func, decorator): """Fix a decorated function with docs about its decorator :param func: Decorated function. :param decorator: The decorator that decorated func. ~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~- PURPOSE: Modify func.__doc__ to describe the decorator. You can use the `fix_doc` function to make sure your decorator fixes docs of a decorated function when writing your own decorators as illustrated below. SEE ALSO: See also the `withdoc` which can apply this to existing decorators. >>> import functools >>> def withfun(func): # Write a decorator to illustrate how to use fix_doc ... "Print how fun a function was after calling it." ... @functools.wraps(func) ... def decorated(*args, **kwargs): ... "Function with fun" ... result = func(*args, **kwargs) ... name = getattr(func, '__name__', '(unknown)') ... print('Calling %s was fun!' % name) ... return result ... fix_doc(decorated, withfun) ... return decorated ... >>> @withfun ... def add(x, y): ... "Add x and y together." ... return x + y ... >>> print(add.__doc__) Add x and y together. ------- Wrapped by decorator withfun: Print how fun a function was after calling it. """ if not func.__doc__: func.__doc__ = 'Function %s' % func.__name__ extra = '\n-------\nWrapped by decorator %s' % getattr( decorator, '__name__', '(unknown)') if decorator.__doc__: extra += ':\n%s' % decorator.__doc__ func.__wrapdoc__ = extra func.__doc__ += extra return func
a758420694d463052169eef9d0b062a02b966cf9
568,471
def numpy_unpad(x, pad_width): """Unpad an array. Args: x (numpy.ndarray): array to unpad pad_width (tuple): padding Returns: numpy.ndarray """ slices = [] for c in pad_width: e = None if c[1] == 0 else -c[1] slices.append(slice(c[0], e)) return x[tuple(slices)]
31f41a7a741d7efa870670c95a8acf8be365975a
703,416
from datetime import datetime def _is_start_date_before_end_date(start: datetime, end: datetime) -> bool: """Whether the start date is before the end date. Args: start: The start date of an event. end: The end date of an event. Returns: True if valid, otherwise returns False. """ return start <= end
4a296d6673f6beb704b590893088c50a97184764
10,450
def stripNewLinesFromStartAndEnd(string): """ String -> String Strips whitespace from the start and end of a string""" return string.lstrip().rstrip()
b08f235df4fa4b69caae3cdfaa29840d817c6fad
528,633
def get_APIKey(filename="api_key.txt"): """Reads the user API from the api_key.txt file""" try: with open(filename, "r") as f: return f.read().strip() except FileNotFoundError: print( "<api_key.txt> file not found! Please create a txt file called <api_key.txt> and paste your API key in there." )
4e76e668fdf2f5e684c637371917607bb70e932c
565,383
from pathlib import Path def get_rotating_data_path(data_folder_path): """Get next available path for data folder""" data_counter = 0 while Path(data_folder_path).joinpath(f"data{data_counter}").exists(): data_counter += 1 return Path(data_folder_path).joinpath(f"data{data_counter}")
8e0c07e86482c4518fbed397e6cfb36fcbc19030
180,034
def find_hotkey(text, attrs, hotkey='hotkey'): """ Given an urwid (text, attr) tuple (as returned by, e.g. :meth:`Text.get_text`), returns the first character of the text matching the *hotkey* attribute, or ``None`` if no character matches. """ ix = 0 for attr, rl in attrs: if attr == hotkey: return text[ix] ix += rl return None
d7fb2c2b0a137916d55ba1725d29eca7ef77e617
338,349
import re import string def clean_text(text: str) -> str: """ Transform all text to lowercase, remove white spaces and special symbols. Args: text (str): Text to process. Returns: str: Processed text. """ # Transform all text to lowercase text = text.lower() # Remove white spaces and special symbols text = re.sub(r"https*\S+", " ", text) text = re.sub(r"@\S+", " ", text) text = re.sub(r"#\S+", " ", text) text = re.sub(r"\'\w+", "", text) text = re.sub("[%s]" % re.escape(string.punctuation), " ", text) text = re.sub(r"\w*\d+\w*", "", text) text = re.sub(r"\s{2,}", " ", text) text = text.strip() return text
fdd61e7a0edb56ca5714359b0fa38a915efdb696
60,965
def read_variant(line, parser): """ Yield the variant in the right format. Arguments: line (str): A string representing a variant line in the vcf parser: A MetadataParser object Return: variant (dict): A dictionary with the variant information. dictionary key: variant information, format: chromosome:position:rsID:reference:alternative; value: {'QUAL': [], 'FILTER':[], 'GT': [], 'info_ID1':[], 'info_ID2':[],...} special parsing of vep annotation: a sub-dictionary with information separated by '|' """ vcf_header = parser.header variant = {} variant_line = line.rstrip().split('\t') if len(vcf_header) != len(variant_line): raise SyntaxError("One of the variant lines is malformed: {0}".format( line )) # key information: chromosome, position, rsID, reference allele, alternative allele key = ':'.join( [variant_line[0], variant_line[1], variant_line[2], variant_line[3], variant_line[4]] ) variant[key] = {} variant[key]['QUAL'] = variant_line[5] variant[key]['FILTER'] = variant_line[6] for i in range(len(variant_line[8].split(':'))): if variant_line[8].split(':')[i] == 'GT': variant[key]['GT'] = variant_line[9].split(':')[i] ##### INFO information ##### for info in variant_line[7].split(';'): info = info.split('=') if len(info) > 1: if ',' in info[1]: variant[key][info[0]] = info[1].split(',') else: variant[key][info[0]] = [info[1]] else: variant[key][info[0]] = [] ##### VEP ANNOTATIONS ##### if 'CSQ' in variant[key]: vep_columns = parser.vep_columns vep_total = variant[key]['CSQ'] variant[key]['CSQ'] = {i: [] for i in vep_columns} for vep in vep_total: vep_list = [[i] for i in vep.split('|')] vep_dict = dict(zip(vep_columns, vep_list)) variant[key]['CSQ'] = {i: variant[key]['CSQ'][i] + vep_dict[i] for i in vep_dict.keys()} return variant
138a216e2fc5d7752a73b2ecc0f645fe8f72057a
401,846
def load_from_file(filename): """Load wordlist from file. Args: filename: Name of file to load wordlist from Returns: List of words read from file """ with open(filename, 'r') as in_file: return in_file.read().splitlines()
d85a8335be02ac49e731374411cf53106b102036
82,566
from typing import Any def increment_occurance_dict(d: dict, k: Any) -> None: """ Increment occurance dict, updates in-place so nothing is returned. """ try: d[k] += 1 except KeyError: d[k] = 1 return None
725b437494f4c647848c54a3d13b4e974fa7f0e8
709,511
def table_exists(cursor, table_name): """ Checks if the table table_name exists in the database. returns true if the table exists in the database, false otherwise. :param cursor: the cursor to the database's connection :type cursor: Cursor :param table_name: the name of the table searching for :type table_name: str :return: true if the table exists, false otherwise :rtype: bool """ cursor.execute(""" SELECT name FROM sqlite_master WHERE type='table' AND name=?; """, (table_name,)) return bool(cursor.fetchone())
230b250203a2d5ba5153b28f10154d8f947a4264
659,229
import io def scalar_to_param(gmpl_param_name, param_value, isstringio=True): """ Convert a scalar to a GMPL representation of a parameter. :param gmpl_param_name: name for resulting GMPL param value :type gmpl_param_name: str :param param_value: :param isstringio: True (default) to return StringIO object, False to return string :return: GMPL dat code for scalar parameter either as a StringIO object or a string. :rtype: StringIO or str Example: scalar_to_param('n_prds_per_day', 48) --> 'param n_prds_per_day := 48;' """ param = 'param ' + gmpl_param_name + ' := ' + str(param_value) + ';\n' if isstringio: param_out = io.StringIO() param_out.write(param) return param_out.getvalue() else: return param
a3a4363886eea4266733b89cdfab53a828018de9
104,125
import re def sanitize_name(name: str) -> str: """ Mangles *name* to fit within a job or job definition name. * Replaces any invalid character with a hyphen ``-`` * Truncates to 128 characters in length """ return re.sub(r"[^A-Za-z0-9_-]", "-", name)[0:128]
5004f10204ce99bf42212de3c4901af7f1162a52
441,819
import difflib def get_close_matches(word, possibilities): """ Return a list of the best "good enough" matches. Wrapper around difflib.get_close_matches() to be able to change default values or implementation details easily. """ return [w for w in difflib.get_close_matches(word, possibilities, 3, 0.7) if w != word]
f0ac43a0b47a1f7e2bdc125d6194d944ec79d3ef
258,466
from pathlib import Path def get_parent_dir(os_path: str) -> str: """ Get the parent directory. """ return str(Path(os_path).parents[1])
3a6e518119e39bfbdb9381bc570ac772b88b1334
796
def createDt(name, sizeOfArray=32): """Takes in a name of a data type and creates a structure of given size. Returns a string that can be copied easily to a plc IDE.""" txt = f'TYPE {name}{sizeOfArray} : \n' txt += '\tSTRUCT\n' for i in range(sizeOfArray): txt += f'\t\tix{str(i).zfill(2)} : {name}; (*Index {str(i).zfill(2)} of struct array *)\n' txt += '\tEND_STRUCT\n' txt += 'END_TYPE\n' return txt
8b4cb3223c4327e59e1ea73bdb2fba56bfa59228
204,462
def remove_unneccessary_words(lyrics: list) -> list: """ Removes words that are irrelevant to analytics Args: lyrics: list of all words in lyrics of a song Return: Lyrics with unneccesary words removed """ # list of words we want to remove words = ['', 'the', 'i', 'a', 'an', 'of', 'with', 'at', 'from', 'into', 'and', 'or', 'but', 'so', 'for', 'yet', 'as', 'because', 'since', 'this', 'that', 'these', 'those', 'in', 'to', 'on', 'all', 'you', 'my', 'it', 'me', 'your', 'when', 'out', 'up', 'be', 'is', 'if'] return list(set(lyrics) - set(words))
aa4827d21a27e26226a76fc230043cf5ee3db8e3
290,489
from typing import OrderedDict def _Net_get_id_name(func, field): """ Generic property that maps func to the layer names into an OrderedDict. Used for top_names and bottom_names. Parameters ---------- func: function id -> [id] field: implementation field name (cache) Returns ------ A one-parameter function that can be set as a property. """ @property def get_id_name(self): if not hasattr(self, field): id_to_name = list(self.blobs) res = OrderedDict([(self._layer_names[i], [id_to_name[j] for j in func(self, i)]) for i in range(len(self.layers))]) setattr(self, field, res) return getattr(self, field) return get_id_name
26b0a90c505f88cc6caf574b18cf931e6cf037a2
396,659
def __list__(*args): """ Function for list literals. __list__(x, y, z, ...) == [ x, y, z, ... ] """ return list(args)
be7afa82b3231bf2b1542afa1cd83efea5c2b492
596,593
def str_digit_to_int(chr): """ Converts a string character to a decimal number. Where "A"->10, "B"->11, "C"->12, ...etc Args: chr(str): A single character in the form of a string. Returns: The integer value of the input string digit. """ # 0 - 9 if chr in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"): n = int(chr) else: n = ord(chr) # A - Z if n < 91: n -= 55 # a - z or higher else: n -= 61 return n
63751c3f4828a23be44c44073e75817d7e885d94
90,271
def build_auth_sub_data(http_request, timestamp, nonce): """Creates the data string which must be RSA-signed in secure requests. For more details see the documenation on secure AuthSub requests: http://code.google.com/apis/accounts/docs/AuthSub.html#signingrequests Args: http_request: The request being made to the server. The Request's URL must be complete before this signature is calculated as any changes to the URL will invalidate the signature. nonce: str Random 64-bit, unsigned number encoded as an ASCII string in decimal format. The nonce/timestamp pair should always be unique to prevent replay attacks. timestamp: Integer representing the time the request is sent. The timestamp should be expressed in number of seconds after January 1, 1970 00:00:00 GMT. """ return '%s %s %s %s' % (http_request.method, str(http_request.uri), str(timestamp), nonce)
f2a76877332d6f7658a3a0af87e96cc0aad72149
160,215
def mean_center_utilmat(U, axis=1, fillna=True, fill_val=None): """Gets the mean-centered utility matrix Parameters: U (DataFrame) : utilily matrix (rows are users, columns are items) axis (int) : The axis along mean is evaluated, {0/'index', 1/'columns'}, default 1 fillna (bool) : Indicates whether missing/null values are to be filled fill_val (None/float) : Value to be used to fill null values when fillna==True, default None Returns: U (DataFrame): mean-centered utility matrix """ mean_centered = U.sub(U.mean(axis=axis), axis=1-axis) if fillna: if fill_val is not None: return mean_centered.fillna(fill_val) else: return mean_centered.fillna(0) else: return mean_centered
dad6239843aa47e8894a04b49f87ef34e4bc2e7a
42,842
def two_sum_test_code_py() -> str: """ Fixture which returns the two sum test code for python """ return """ import unittest class TestTwoSum(unittest.TestCase): def testTwoSum_1(self): array = [3, 5, -4, 8, 11, 1, -1, 6] targetSum = 10 result = two_sum(array, targetSum) assert sum(result) == targetSum def testTwoSum_2(self): array = [4, 6, 1, -3] targetSum = 3 result = two_sum(array, targetSum) assert sum(result) == targetSum if __name__ == '__main__': unittest.main() # run all tests """
6914bae50b9d1acd7c1547c7292c5a082a3b350b
309,838
def unique(df, columns=None): """ Asserts that columns in the DataFrame only have unique values. Parameters ---------- df : DataFrame columns : list list of columns to restrict the check to. If None, check all columns. Returns ------- df : DataFrame same as the original """ if columns is None: columns = df.columns for col in columns: if not df[col].is_unique: raise AssertionError("Column {!r} contains non-unique values".format(col)) return df
43457736c49c3ce29bffa701e26e40cdcd014636
358,342
import math def deg_to_rad(theta): """Converts degrees to radians.""" return math.pi * theta / 180
8daaa968cfcba8ba2ae4a043e78c389e11a8a7a4
322,747
def produit(a,b): """ renvoie le résultat de la multiplication des nombres a et b""" return a*b
866d4561edd2b2168ca167ff7116241c0fff310c
44,092
def _clean_line(line): """Strips and maybe decodes a line of text.""" line = line.strip() if isinstance(line, bytes): line = line.decode("latin-1") return line
4fcb6f3820b5319182b0dc41e63bdcabe046f2e8
157,504
def spq_encode(answers): """Function that receives an array of 20 answers and encodes the results for the SPQ survey. It returns the following tuple: (DA: Deep approach, SA: Surface approach, DM: Deep motive, SM: Surface motive, DS: Deep strategy, SS: Surface strategy) They are the averages of the corresponding questions. :param answers: Array of 20 answers that encodes the results of the survey """ dm_idx = [1, 5, 9, 13, 17] ds_idx = [2, 6, 10, 14, 18] sm_idx = [3, 7, 11, 15, 19] ss_idx = [4, 8, 12, 16, 20] # Calculate the four accumulations first dm_val = 1.0 * sum([answers[i - 1] for i in dm_idx]) ds_val = 1.0 * sum([answers[i - 1] for i in ds_idx]) sm_val = 1.0 * sum([answers[i - 1] for i in sm_idx]) ss_val = 1.0 * sum([answers[i - 1] for i in ss_idx]) # Return the six values return ((dm_val + ds_val) / (len(dm_idx) + len(ds_idx)), (sm_val + ss_val) / (len(sm_idx) + len(ss_idx)), dm_val / len(dm_idx), sm_val / len(sm_idx), ds_val / len(ds_idx), ss_val / len(ss_idx))
4f4d4cb9bb3f93f7ea7150655dd0bd2c20dc997b
430,839
def has_dict_protocol(obj): """ Checks whether object supports dict protocol. """ return hasattr(obj, "__getitem__") and hasattr(obj, "__setitem__")
8fd47169e0edfb7bc151691d87d0e0e20c5eff83
209,879
from typing import Any def get_metadata() -> dict[str, Any]: """Return the metadata of the module.""" return {"signature": "univariate"}
5b69c534f9e2f2d694089b2ea132b19a1efa1b67
387,465
from dateutil import tz from datetime import datetime def fordtime_to_datetime(fordTimeString, useUTC=True): """Convert Ford UTC time string to local datetime object""" from_zone = tz.tzutc() to_zone = tz.tzlocal() try: utc_dt = datetime.strptime(fordTimeString, "%m-%d-%Y %H:%M:%S.%f") except: utc_dt = datetime.strptime(fordTimeString, "%m-%d-%Y %H:%M:%S") utc = utc_dt.replace(tzinfo=from_zone) if useUTC: return utc return utc.astimezone(to_zone)
8aff092d60559f5fa8da8de7a430180b68b571a2
630,660
from datetime import datetime def create_identifier(hint: str = '') -> str: """ Can be used to create unique names for files by exploiting the uniqueness of the current date. Be aware that if two identifiers are created during the same second they are equal! Follows the form YYYY_MM_DD__hh_mm_ss. :return: YYYY_MM_DD__hh_mm_ss_{hint} """ now = datetime.now() dt_string = now.strftime("%Y_%m_%d__%H_%M_%S") return f"{dt_string}_{hint}" if hint else dt_string
7f2bcae9c107f71be9e8f19235b0f6e470ac1de6
683,599
import math def polar_to_cartesian(radial, theta): """Convert a coordinate in (r, th) to (x, y).""" return (radial * math.cos(theta), radial * math.sin(theta))
42c327caa19fb926c49ec529124ed5a8c785693a
221,421
import itertools def get_spells(sequence): """ Returns a list of tuples where each tuple holds the element and the length of the spell (also known as run or episode) for each spell in the sequence. Example --------- >>> sequence = [1,1,2,1,2,2,3] >>> ps.get_spells(sequence) [(1, 2), (2, 1), (1, 1), (2, 2), (3, 1)] """ # get each spell and its length spells = [(k, sum(1 for x in v)) for k,v in itertools.groupby(sequence)] # this is functionally equivalent to the following; # spells = [(k, len(list(v))) for k,v in itertools.groupby(sequence)] return spells
523c2c087c81fd3b985e345caa2646bb6fa888d5
186,978
def remove_multicollinearity_by_coefficient_threshold(df, method = 'spearman', coefficient_threshold = 0.7): """ Uses the correlation between features and a specified threshold to identify and remove collinear/multicollinear features. Args: df ([pandas.DataFrame]): A dataframe that includes all the features that are being considered for modeling without the target. method (str, optional): spearman: Spearman rank correlation pearson : Pearson correlation coefficient kendall : Kendall Tau correlation coefficient Defaults to 'spearman'as it is less prone to false alarms and tests if a monotonic relationship exists, however it is more computationally expensive as it is non-parametric. correlation_threshold (float, optional): Defaults to 0.7 as per the threshold established by Dormann, C. F., J. Elith, S. Bacher, et al. 2013. Collinearity: a review of methods to deal with it and a simulation study evaluating their performance. Ecography 36:27–46. Returns: A Pandas Dataframe that has removed all collinear/multicollinear features from the feature space based upon the correlation coefficient threshold. """ correlated_features = [] correlation_matrix = df.corr(method = method) for i in range(len(correlation_matrix.columns)): for j in range(i): if abs(correlation_matrix.iloc[i, j]) > coefficient_threshold: colname = correlation_matrix.columns[i] correlated_features.append(colname) # Create Final DataFrame Without Correlated Features df = df.drop(correlated_features, axis=1) return df
3de2fcb1ffe2c1d41af5ee62aa2a437e5c7baf41
662,799
def clang_find_declarations(node): """Finds declarations one level below the Clang node.""" return [n for n in node.get_children() if n.kind.is_declaration()]
dc2408ed1f60bdbb1c281fd59510fb9144a5d257
92,226
import math def rotate(x, y, cx, cy, angle): """ Rotate a point around a center. >>> x, y = rotate(1, 1, 0, 0, 90) >>> print("x = {:.1f}, y = {:.1f}".format(x, y)) x = -1.0, y = 1.0 """ temp_x = x - cx temp_y = y - cy # now apply rotation rotated_x = temp_x * math.cos(math.radians(angle)) - \ temp_y * math.sin(math.radians(angle)) rotated_y = temp_x * math.sin(math.radians(angle)) + \ temp_y * math.cos(math.radians(angle)) # translate back x = rotated_x + cx y = rotated_y + cy return x, y
ea6e68e67025775d64d3f8eba33dc182279a4504
527,258
import re def _re_flatten(p): """ Turn all capturing groups in a regular expression pattern into non-capturing groups. """ if '(' not in p: return p return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
c63242f5b51f1c345f8b92e8a71b7aba8fb7712c
326,942
import math def longwave_radiation(Tmin_C, Tmax_C, ea, Rs, Rso, rh): """Calculate the net long-wave radiation. Ref: http://www.fao.org/docrep/x0490e/x0490e00.htm Eqn 39 Tmin_C: Minimum temperature during the calculation period Tmax_C: Maximum temperature during the calculation period ea: Actual vapor pressure in kPa Rs: Measured radiation. See below for units. Rso: Calculated clear-wky radiation. See below for units. rh: Relative humidity in percent Because the formula uses the ratio of Rs to Rso, their actual units do not matter, so long as they use the same units. Returns back radiation in MJ/m^2/day Example: >>> print("%.1f mm/day" % longwave_radiation(Tmin_C=19.1, Tmax_C=25.1, ea=2.1, ... Rs=14.5, Rso=18.8, rh=50)) 3.5 mm/day Night time example. Set rh = 40% to reproduce the Rs/Rso ratio of 0.8 used in the paper. >>> print("%.1f mm/day" % longwave_radiation(Tmin_C=28, Tmax_C=28, ea=3.402, ... Rs=0, Rso=0, rh=40)) 2.4 mm/day """ # Calculate temperatures in Kelvin: Tmin_K = Tmin_C + 273.16 Tmax_K = Tmax_C + 273.16 # Stefan-Boltzman constant in MJ/K^4/m^2/day sigma = 4.903e-09 # Use the ratio of measured to expected radiation as a measure of cloudiness, but # only if it's daylight if Rso: cloud_factor = Rs / Rso else: # If it's nighttime (no expected radiation), then use this totally made up formula if rh > 80: # Humid. Lots of clouds cloud_factor = 0.3 elif rh > 40: # Somewhat humid. Modest cloud cover cloud_factor = 0.5 else: # Low humidity. No clouds. cloud_factor = 0.8 # Calculate the longwave (back) radiation (Eqn 39). Result will be in MJ/m^2/day. Rnl_part1 = sigma * (Tmin_K ** 4 + Tmax_K ** 4) / 2.0 Rnl_part2 = (0.34 - 0.14 * math.sqrt(ea)) Rnl_part3 = (1.35 * cloud_factor - 0.35) Rnl = Rnl_part1 * Rnl_part2 * Rnl_part3 return Rnl
11a5f520a62e86128914162c02550a8244a18cda
139,115
def format_duration(duration): """Given a duration in minutes, return a string on the format h:mm. >>> format_duration(75) '1:15' >>> format_duration(4) '0:04' >>> format_duration(601) '10:01' """ h = duration // 60 m = duration % 60 return f"{h}:{m:02}"
951e2b88d30d1974729cdc30b5dd5ed2b872130d
162,156
def _event_QColorButton(self): """ Return value change signal for QColorButton """ return self.colorChanged
08fc8e1bf9a907b1649a66879cda55347677b94d
159,524
import torch def tensor_to_gradcheck_var(tensor, dtype=torch.float64, requires_grad=True): """Converts the input tensor to a valid variable to check the gradient. `gradcheck` needs 64-bit floating point and requires gradient. """ assert torch.is_tensor(tensor), type(tensor) return tensor.requires_grad_(requires_grad).type(dtype)
55958a83c9c827ca621e5b724d3bb077bc938a71
607,561
def select_top_relsent_cred(relsent): """Select the top available credibility source for a relsent This will be either the domain_credibility or a normalised claimReview_credibility_rating. :param relsent: a SimilarSent dict :returns: either the domain_credibility, or the claimReview_credibility_rating whichever has the highest confidence (although claimReview has precedence). If neither is available, returns an empty dict. :rtype: dict """ domcred = relsent.get('domain_credibility', {}).get('credibility', {}) domcred['source'] = 'domain' domcred['domainReviewed'] = relsent.get( 'domain_credibility', {}).get('itemReviewed', "??") cr_cred = relsent.get('claimReview_credibility_rating', {}) cr_cred['source'] = 'claimReview' if 'confidence' in cr_cred and cr_cred.get('confidence', -1.0) > 0.2: # avoid domcred, it could point to trustworthy factchecker domain!! src_creds = [cr_cred] else: src_creds = [domcred, cr_cred] src_creds = sorted(src_creds, key=lambda cred: cred.get('confidence', -1.0), reverse=True) return src_creds[0]
78f5892f88bc60ca8b9ac8a1e71d0996003effbe
230,882
def test_for_a_stretch(seq, a_stretch): """ Test whether or not the immediate downstream bases of the PAS are A throughout the a_stretch distance. >>> test_for_a_stretch("AAAAATTTTTTTTTT", 5) 'a' >>> test_for_a_stretch("AAAATTTTTTTTTTT", 5) '' """ return 'a' if seq[:a_stretch].count("A") == a_stretch else ''
d974a85a0fd7548758731f6f67ee40f4ffcd0be3
77,649
def _group_by_size(fl): """Group FileInfos by their size""" # Start by sorting fl.sort(key=lambda f: f['size']) begin, end, length = 0, 1, len(fl) groups = [] while end < length: # Get bounds of same-size files while fl[begin]['size'] == fl[end]['size']: end += 1 # Take the slice of same-size files group = tuple(fl[begin:end]) groups.append(group) # Advance the slicer begin = end end += 1 return groups
0807798c4597f4db56e6bdf88b41d4fa75d4d33a
145,446
def get_or_pop_arg(argname,args,kwargs,argspec): """Finds the value of argname by looking in args and kwargs. Is argname is present in argspec then the value is simply returned. If argname is not present, the value is removed from arg or kwargs as appropriate. Note: If argname is not present in either argspec or kwargs then it is assumed to be the last element of args and is popped off (and added to kwargs if the argspec says a **keywords argument is present).""" regargs, varargs, varkwargs, defaults = argspec if argname in kwargs: if varkwargs is None and argname not in regargs: return kwargs.pop(argname) else: return kwargs[argname] elif argname not in regargs: if varkwargs is None: return args.pop() else: value = args.pop() kwargs[argname] = value return value else: return args[regargs.index(argname)]
8e9fc2059fb484e32873f75c4f93dd78743c470a
292,035