content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_continuous_segments(array): """ Get continuous segments for single frame. Args: array (array): | ordered array with integers representing resids ~ single frame | e.g.: array = [5, 6, 7, 12, 13, 18, 19, 20] Returns: SEGMENTS (list) list of continuous segments Example: | >> gdt.get_continuous_segments([1,2,3,22,23,50,51,52]) | [[1, 2, 3], [22, 23], [50, 51, 52]] """ SEGMENTS = [] temp = [] for i in range(len(array)): temp.append(array[i]) try: if array[i]+1 != array[i+1]: SEGMENTS.append(temp) temp = [] except IndexError: SEGMENTS.append(temp) return SEGMENTS
365f3b20a7ec4ae016ccc48dff9fd5500cd48746
697,554
import base64 import struct def _decode_real(blob): """Inverse of _encode_real.""" bytes_ = base64.b64decode(blob) return struct.unpack('<d', bytes_)[0]
b570d5e78177e9f4b783478773665ddf80de4301
697,555
def retrieve_attrs(instancenorm): """ Gather the required attributes for the GroupNorm plugin from the subgraph. Args: instancenorm: Instance Normalization node in the graph. """ attrs = {} # The 2nd dimension of the Reshape shape is the number of groups attrs["num_groups"] = instancenorm.i().i(1).attrs["value"].values[1] attrs["eps"] = instancenorm.attrs["epsilon"] # 1 is the default plugin version the parser will search for, and therefore can be omitted, # but we include it here for illustrative purposes. attrs["plugin_version"] = "1" # "" is the default plugin namespace the parser will use, included here for illustrative purposes attrs["plugin_namespace"] = "" return attrs
88d752a90c285952af8cbed6fd1183111fc99b96
697,557
import textwrap def mk_block(decl, contents, indent=2): """Format a block like this: decl { contents } where `decl` is one line but contents can be multiple lines. """ return decl + ' {\n' + textwrap.indent(contents, indent * ' ') + '\n}'
e57d3fa8f4c94b3a1d4e1145668f8dceccf02025
697,560
import torch def affine_make_square(affine): """Transform a rectangular affine into a square affine. Parameters ---------- affine : (..., ndim[+1], ndim+1) tensor Returns ------- affine : (..., ndim+1, ndim+1) tensor """ affine = torch.as_tensor(affine) device = affine.device dtype = affine.dtype ndims = affine.shape[-1]-1 if affine.shape[-2] not in (ndims, ndims+1): raise ValueError('Input affine matrix should be of shape\n' '(..., ndims+1, ndims+1) or (..., ndims, ndims+1).') if affine.shape[-1] != affine.shape[-2]: bottom_row = torch.cat((torch.zeros(ndims, device=device, dtype=dtype), torch.ones(1, device=device, dtype=dtype)), dim=0) bottom_row = bottom_row.unsqueeze(0) bottom_row = bottom_row.expand(affine.shape[:-2] + bottom_row.shape) affine = torch.cat((affine, bottom_row), dim=-2) return affine
440fef0cf43eb501fe25555ac4e113865108d13d
697,563
import re def parse_show_ntp_trusted_keys(raw_result): """ Parse the 'show ntp trusted-keys' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show ntp trusted-keys command \ in a dictionary of the form: :: { '11': { 'key_id': '11' }, '12': { 'key_id': '12' } } """ ntp_trusted_key_re = ( r'(?P<key_id>\d+)' ) result = {} for line in raw_result.splitlines(): re_result = re.search(ntp_trusted_key_re, line) if re_result: partial = re_result.groupdict() result[partial['key_id']] = partial return result
37c573548359553b03237ddece068e52e9f51fb4
697,566
import configparser def read_aranet_conf(file): """Reads the Aranet Cloud configuration file Args: file (str or os.PathLike): A path-like object giving the pathname of the configuration file. Returns: [configparser.ConfigParser]: A ConfigParser object with the configuration. """ aranet_conf = configparser.ConfigParser( defaults={ "endpoint": "https://aranet.cloud/api" } ) with open(file) as f: aranet_conf.read_file(f) return aranet_conf
36991e18bd4049145f91aa27aecad57647fc3230
697,572
def check_anagrams(first_str: str, second_str: str) -> bool: """ Two strings are anagrams if they are made of the same letters arranged differently (ignoring the case). >>> check_anagrams('Silent', 'Listen') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('There', 'Their') False """ return ( "".join(sorted(first_str.lower())).strip() == "".join(sorted(second_str.lower())).strip() )
345d83fdcde1a8e1a0d0d2b73e0a16f5b5f816d1
697,574
def stringify_span(range): """Returns a nicely-formatted string representing a span of years. Arguments: range {range} - A range object """ if len(range) >= 2: timespan = f"{range[0]}-{range[-1]}" else: timespan = range[0] return timespan
043cc8aae9cb2063c5af16dcb55223f05056b903
697,579
from typing import List import statistics def udf_median(items: List[float]): """ Median of elements in a list """ return statistics.median(items)
6ed0841251de91e2758489d742c071f303444dfe
697,585
def substrings(seq): """ Returns a set of all the substrings of s. Recall we can compute a substring using s[i:j] where 0 <= i, j < len(s). Example: >>> substrings("abc") "a", "ab", "abc", "b", "bc", "c" """ subs = set() for i in range(len(seq)): #determine the starting index for j in range(i + 1, len(seq) + 1): #determine the ending index subs.add(seq[i:j]) return subs
7e27e1e8902410d3ca629edf13c2ce2f54107d08
697,586
import pytz def to_zulu_string(dt): """Returns a Zulu time string from a datetime. Assumes naive datetime objects are in UTC. Ensures the output always has a floating-point number of seconds. """ # Assume non-tz-aware datetimes are in UTC. if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None: dt = dt.replace(tzinfo=pytz.UTC) # Convert datetime into UTC. isodate = dt.astimezone(pytz.UTC).isoformat().split('+')[0] # Add fractional seconds if not present. if '.' not in isodate: isodate += '.0' return isodate + 'Z'
2d7cb31cd83f86e9f2be78bbb8c3d9d517e6d31f
697,590
def serialize_value(value): """Serialize a single value. This is used instead of a single-shot `.format()` call because some values need special treatment for being serialized in YAML; notably, booleans must be written as lowercase strings, and floats exponents must not start with a 0. """ if isinstance(value, bool): return repr(value).lower() elif isinstance(value, float): return "{0:.16}".format(value).replace("e+0", "e+").replace("e-0", "e-") else: return repr(value)
6c01f79cd745799402c5d1db2ed6acafa8dd1c2a
697,591
def track_title_and_slug_from_penta(tracks, room_slug): """ Return the track title (e.g. Community) based on the room slug (mcommunity) :param tracks: :param room_slug: :return: """ if room_slug in tracks: return tracks[room_slug]['title'], tracks[room_slug]['slug'] return None, None
4fb4965b8e81c82dd6f34b03166e2c2812653328
697,593
def list_format(lst): """ Unpack a list of values and write them to a string """ return '\n'.join(lst)
754e8cb4710558f551357bf5ac7ed54cbf0730ca
697,595
def segmentize_geometry(geom, segment_size=1.): """ Segmentizes the lines of a geometry (decreases the point spacing along the lines) according to a given `segment_size`. Parameters ---------- geom : ogr.Geometry OGR geometry object. segment_size : float, optional For precision: distance of longest segment of the geometry polygon in units of the spatial reference system. Returns ------- geom_fine : ogr.Geometry A congruent geometry realised by more vertices along its shape. """ geom_fine = geom.Clone() geom_fine.Segmentize(segment_size) geom = None return geom_fine
324317a1f64ff0794b204c4b0f4b39ff0df9221d
697,604
def _json_file_name(name): """Returns the name of the statistics file for `name`.""" return name + '.json'
e554b5e40f9fceb6c98986611588c4853d6861da
697,606
def get_model_metadata_fixture() -> dict: """Test fixture for model metadata Returns: dict: Example metadata response structure """ metadata = { "kind": "Model", "apiVersion": "v1alpha4", "metadata": { "displayName": "model display name", "name": "test-model", "summary": "Model summary", "description": "Model description", "type": "Simulation", "owner": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a", }, "spec": { "inputs": { "env": [ { "name": "R_NUMBER", "title": "R Number", "desc": "The reproduction number", "type": "number", "default": 1.5, "min": 0.1, "max": 2.0, }, { "name": "setting", "title": "Setting", "desc": "Mode to run the model in", "type": "string", "default": "long_default_name", }, ], "dataslots": [ { "default": [ { "uid": "11111a1a-a111-11aa-a111-11aa11111aaa", "versionUid": "21111a1a-a111-11aa-a111-11aa11111aaa", } ], "path": "inputs/", "required": True, "name": "Inputs", } ], }, "outputs": { "datasets": [ { "name": "dataset_1.xls", "type": "xls", "desc": "Datset 1 description", }, { "name": "dataset_2.xls", "type": "xls", "desc": "Datset 2 description", }, ] }, "image": "dreg.platform.dafni.rl.ac.uk/nims-prod/test-model:0a0a0a0a-0a00-0a00-a000-0a0a0000000a", }, } return metadata
c4ef1d1088bee664e350c32fe457657255f500da
697,607
from pathlib import Path def main(file_path: Path) -> str: """ Test function for showing file path. Args: file_path: file path Returns: str: HTML output """ return f"Hello world mission2, filePath is {file_path}."
06c23ddf08dba563a061e4a503b5ba76f33b456a
697,612
def GetNamesList(filepath): """ Open a file with a given filepath containing place names and return a list. """ f = open(filepath) return f.read().splitlines()
1e32772d7483ec9cae7cec2e523a3c7b6ef472fe
697,616
def splitOn(splitter): """Return a function that splits a string on the given splitter string. The function returned filters an empty string at the end of the result list. >>> splitOn('X')("aXbXcX") ['a', 'b', 'c'] >>> splitOn('X')("aXbXc") ['a', 'b', 'c'] >>> splitOn('X')("abc") ['abc'] >>> splitOn('X')("abcX") ['abc'] """ def f(s): l = s.split(splitter) if l and not l[-1]: return l[:-1] else: return l return f
c316d0de407aa6ca93208ad7d7ce0ca8cf3c176a
697,618
def get_default(key, ctx): """ Get the default argument using a user instance property :param value: The name of the property to use :param ctx: The click context (which will be used to get the user) :return: The default value, or None """ try: value = getattr(ctx.code_builder, key) if value == "": value = None except KeyError: value = None return value
c1c8db3bb996a2c018a5eb4ca45638601e15ce82
697,622
def call(f, *args, **kwargs): """When used in pipe, calls given function. Calls given function, passing pipe argument first, before args and kwargs. Hence function should be (re)designed to handle this. Resulting value is then used as pipe argument and passed along further. Note: kwargs will overwrite pipe argument ``if xarg in kwarg``. Args: f: function to call *args: other positional arguments to the function **kwargs: keyword arguments to the function Returns: function that performs the required action in pipe """ def g(x): pargs = (x,) + args return f(*pargs, **kwargs) return g
4fce00354ee6b185a60d542ed173753967f0ca1f
697,623
def extract_masked_part_from_spectrogram(mask, spectrogram): """ Extract the masked part of the spectrogram """ return spectrogram[:,mask]
76be4d93aca3684edfe374bcce0ce09b833ab687
697,624
def get_stack_output_value(stack, key: str) -> str: """ Get a stack output value :param stack: the boto3 stack resource :param key: the output key :return: str """ results = [i for i in stack.outputs if i["OutputKey"] == key] if not results: raise ValueError(f"could not find output with key {key} in stack") return results[0]["OutputValue"]
0742a2b8558b7aa4ea1656a6645a9b3e5fbdb17a
697,630
def _skip_class_name(name): """Determine if the class name should be skipped.""" return name == "Bundled" or name.startswith("_")
7cc942d327784da2ae1aefc99859e52b3696475b
697,632
def get_longest_parent(task): """ Returns the parent of the given task which has the longest execution time Args: task (Node) Returns: Node """ longest_parent = task.get_parents()[0] for parent in task.get_parents(): if longest_parent.get_exec_time() < parent.get_exec_time(): longest_parent = parent return longest_parent
8e41984c1e287f8a2d457150d332f868660c4eef
697,634
def gen_mapping(args, service, weight=None, labels={}): """ Generate a Mapping for a service/prefix and (optional) weight """ prefix = args.prefix mapping = { "apiVersion": "getambassador.io/v1", "kind": "Mapping", "metadata": { "name": f"mapping-for-{service}" }, "spec": { "prefix": prefix, "service": service } } if args.namespace: mapping["metadata"]["namespace"] = args.namespace if len(labels) > 0: mapping["metadata"]["labels"] = labels if weight: mapping["spec"]["weight"] = weight return mapping
167d044cffed74d5d04498e4941a147395977602
697,638
def keep_alpha_numeric(input_text: str) -> str: """ Remove any character except alphanumeric characters """ return ''.join(c for c in input_text if c.isalnum())
6775d7ba72ae06c294acde9bc2964ae51a954ce3
697,641
def reunion(set1, set2): """Given the two sets, returns their reunion.""" return set1 | set2
3e7af2a784d570d2a28c6665905242f5b6e812de
697,642
def are_brackets_balanced(expr): """ Checks to see if there are parens or brackets that are unbalanced """ stack = [] # Traversing the Expression for char in expr: if char in ["(", "{", "["]: # Push the element in the stack stack.append(char) elif char in [")", "}", "]"]: # IF current character is not opening # bracket, then it must be closing. # So stack cannot be empty at this point. if not stack: return False current_char = stack.pop() if current_char == '(': if char != ")": return False if current_char == '{': if char != "}": return False if current_char == '[': if char != "]": return False # Check Empty Stack if stack: return False return True
3e548cb2999c404d6ed0587ef1a959767c11d479
697,646
def _get_p_y_z_halfspace(particles): """ This function calcualtes the probabilities of y and z half space for a given set of particles Parameters ---------- particles : list List of SourceParticle Returns ------- p_y_halfspace : float The probability of y half space p_z_halfspace : float The probability of z half space """ y_count, z_count = 0, 0 for s in particles: if s.y < 0.5: y_count = y_count + 1 if s.z < 0.5: z_count = z_count + 1 p_y_halfspace = float(y_count) / len(particles) p_z_halfspace = float(z_count) / len(particles) return p_y_halfspace, p_z_halfspace
fb822d7b03b4fc387fff8723d5a8d7dafe8f9bfc
697,655
import torch def get_data(generic_iterator, generic_loader): """Code to get minibatch from data iterator Inputs: - generic_iterator; iterator for dataset - generic_loader; loader for dataset Outputs: - data; minibatch of data from iterator - generic_iterator; iterator for dataset, reset if you've reached the end of the dataset""" try: data = next(generic_iterator)[0] except StopIteration: generic_iterator = iter(generic_loader) data = next(generic_iterator)[0] if torch.cuda.is_available(): data = data.cuda() return data, generic_iterator
e827ab7cea13c96953260d6b157a3e6ab370c6c9
697,656
def get_name(schema_url): """ Extract the item name from it's URL :param schema_url: the URL of the schema :return name: the name of the schema (eg: 'item_schema.json') """ name = schema_url.split("/")[-1].replace("#", '') return name
e7632dc959a4503b51cc5c3f851a063056893507
697,658
import torch from typing import Callable def encode_and_aggregate(input_tensor: torch.Tensor, encoder: torch.nn.Module, num_encoder_input_channels: int, num_image_channels: int, encode_channels_jointly: bool, aggregation_layer: Callable) -> torch.Tensor: """ Function that encodes a given input tensor either jointly using the encoder or separately for each channel in a sequential manner. Features obtained at the output encoder are then aggregated with the pooling function defined by `aggregation layer`. """ if encode_channels_jointly: input_tensor = encoder(input_tensor) input_tensor = aggregation_layer(input_tensor) else: shape = input_tensor.shape channel_shape = (shape[0], num_encoder_input_channels, shape[2], shape[3], shape[4]) encode_and_aggregate = [] # When using multiple encoders, it is more memory efficient to aggregate the individual # encoder outputs and then stack those smaller results, rather than stack huge outputs and aggregate. for i in range(num_image_channels): start_index = i * num_encoder_input_channels end_index = start_index + num_encoder_input_channels encoder_output = encoder(input_tensor[:, start_index:end_index].view(channel_shape)) aggregated = aggregation_layer(encoder_output) encode_and_aggregate.append(aggregated) input_tensor = torch.cat(encode_and_aggregate, dim=1) return input_tensor
f2d65e1c2c214cfddae40dd235fba86a61866277
697,661
def apply_each(functions, *args, **kwargs): """Returns list containing result of applying each function to args.""" return [f(*args, **kwargs) for f in functions]
15dc85cb155db030f4eaf2eccdbd40ed20585b82
697,663
def _model_delete_by_id_function_name(model): """Returns the name of the function to delete a model by id""" return '{}_delete_by_id'.format(model.get_table_name())
df3e1de727b585c1cab403dfcfea08eafe25aeec
697,664
def subset_matrix(matrix, name_list_1, name_list_2): """Subsetting matrix into two symetric matrices given two label lists Parameter: ---------- matrix : Pandas DataFrame Full similarity matrix name_list_1 : list list of names name_list_2 : list list of names Returns: -------- list: list with subsets of matrix """ sim_matrix_1 = matrix[name_list_1].loc[name_list_1] sim_matrix_2 = matrix[name_list_2].loc[name_list_2] sim_matrix = matrix[name_list_1].loc[name_list_2] return sim_matrix_1, sim_matrix_2, sim_matrix
1b36308c73c864fa446b0bc756240d4d2a8639a6
697,668
def my_sum(x_val: int, y_val: int) -> int: """Sum 2 integers. Args: x_val (int): integer to sum. y_val (int): integer to sum. Returns: int: result of the summation. """ assert isinstance(x_val, int) and isinstance( y_val, int ), "Input parameters should be integers." return x_val + y_val
c10001f9ff720ce3d180aaa89af555ac1860ae33
697,673
def cidr_to_mask(cidr): """ Converts decimal CIDR notation to a quad dotted subnetmask. :param cidr: Decimal CIDR number :return: Quad dotted subnetmask as string """ cidr = int(cidr) mask = (0xffffffff >> (32 - cidr)) << (32 - cidr) return (str((0xff000000 & mask) >> 24) + '.' + str((0x00ff0000 & mask) >> 16) + '.' + str((0x0000ff00 & mask) >> 8) + '.' + str((0x000000ff & mask)))
037e1cacfb392fad4a06edf1d9c16c4f27c0468b
697,676
import random def computer_choice() -> str: """ This function takes no inputs and returns either "rock", "paper", or "scissors" randomly to simulate the computer's decision. """ # 'random' is a library. A library is essentially a collection of code that # contains some kind of feature(s) that we want to use in our program. In # order to use these features, we need to bring the library into our program. # This is what is meant by 'importing' a library. # A function can 'return' a value, allowing it to directly represent some kind # of data. In this case, we are returning the output of the choice function # (from the random library). This choice function has 1 input (argument): a # tuple of 3 possible comma-separated choices that it may return. return random.choice( ("rock", "paper", "scissors") )
1f8e064adfbd6242f2bb199caf98b77acf38c596
697,677
def head(your_list, default=None): """Simple head function implementation.""" return next(iter(your_list or []), default)
382e6f069b7aa15c710b41007bbd03be23d63bde
697,692
from typing import Iterable from typing import Any def is_empty(iterable: Iterable[Any]) -> bool: """ Check whether provided iterable is empty. :param iterable: iterable whose emptiness is to be checked. :return: flag indicating if the provided iterable is empty. """ return not any(map(lambda el: True, iterable))
cef865deec6a7fd4241e15abaac7fced9ac114b0
697,694
def gen_xacro_macro(name, links, joints): """ Generates (as a string) the complete urdf element sequence for a simple ROS-Industrial xacro macro that defines geometry (links, joints). It takes a single argument ``prefix`` that should be used when instantiating the macro in a composite parent scene. Note that the ``links`` and ``joints`` sequences should already be strings. The ``gen_link(..)`` and ``gen_joint_fixed(..)`` macros may be used for that. :param name: Name of the macro, ``str`` :param links: Sequence containing all the links that should be defined by macro, ``seq(str)`` :param joints: Sequence containing all the joints that should be defined by the macro, ``seq(str)`` :returns: urdf element sequence for a xacro macro, ``str`` """ links_str = ''.join(links) joints_str = ''.join(joints) return '<xacro:macro name="{name}" params="prefix">{links}{joints}</xacro:macro>'.format( name=name, links=links_str, joints=joints_str)
4be7f3353a9fba1f7127e81e29e3536150ed834e
697,695
def solution(A): # O(N) """ Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum. >>> solution([-2, 1, -3, 4, -1, 2, 1, -5, 4]) 6 >>> solution([-2, -1, -3, 1]) 1 """ summed_values = [] # O(1) summed_value = 0 # O(1) for value in A: # O(N) summed_value += value # O(1) summed_values.append(summed_value) # O(1) min_point = float('inf') # O(1) largest_sum = 0 # O(1) for value in summed_values: # O(N) if value < min_point: # O(1) min_point = value # O(1) elif value - min_point > largest_sum: # O(1) largest_sum = value - min_point # O(1) return largest_sum # O(1)
9c41707940d563ba10c97af53e72f8bbf30fd071
697,696
def _index(key, sequence, testfn=None, keyfn=None): """Return the index of key within sequence, using testfn for comparison and transforming items of sequence by keyfn first. >>> _index('e', 'hello') 1 >>> _index('E', 'hello', testfn=_equalsIgnoreCase) 1 >>> _index('x', 'hello') """ index = 0 for element in sequence: value = element if keyfn: value = keyfn(value) if (not testfn and value == key) or (testfn and testfn(value, key)): return index index += 1 return None
0595726f9e14e8f4a3fd4b925de04c801be317a3
697,698
import six def like_filter(query, cls, search_opts): """Add 'like' filters for specified columns. Add a sqlalchemy 'like' filter to the query for any entry in the 'search_opts' dict where the key is the name of a column in 'cls' and the value is a string containing '%'. This allows the value of a column to be matched against simple sql string patterns using LIKE and the '%' wildcard. Return the modified query and any entries in search_opts whose keys do not match columns or whose values are not strings containing '%'. :param query: a non-null query object :param cls: the database model class the filters will apply to :param search_opts: a dictionary whose key/value entries are interpreted as column names and search patterns :returns: a tuple containing the modified query and a dictionary of unused search_opts """ if not search_opts: return query, search_opts remaining = {} for k, v in six.iteritems(search_opts): if isinstance(v, six.string_types) and ( '%' in v and k in cls.__table__.columns): col = cls.__table__.columns[k] query = query.filter(col.like(v)) else: remaining[k] = v return query, remaining
c42318350a9c19715fc6a2e0c74d8bec423b4cfc
697,701
def read_file(filename): """ Return the contents of the file with the given filename as a string >>> write_file('read_write_file.txt', 'Hello World') >>> read_file('read_write_file.txt') 'Hello World' >>> os.unlink('read_write_file.txt') """ with open(filename) as in_fh: return in_fh.read()
756187a755a54b2d6e96ad6d297ceb2472afbb6c
697,704
from typing import Callable def linear_schedule(initial_value: float) -> Callable[[float], float]: """ Linear learning rate schedule. :param initial_value: Initial learning rate. :return: schedule that computes current learning rate depending on remaining progress """ def func(progress_remaining: float) -> float: """ Progress will decrease from 1 (beginning) to 0. :param progress_remaining: :return: current learning rate """ return progress_remaining * initial_value return func
47a580788c745b7566c22daa97c3aecee0bb8ff2
697,708
from datetime import datetime def is_datenum(datenum): """ Return True if given str is a date in format %Y%m%d """ try: datetime.strptime(datenum, "%Y%m%d") return True except (ValueError, TypeError): return False
c61998ebf18a3fbdd4c87463a2ab0790864c62b4
697,710
def _compute_position(input, index): """Compute line/column position given an index in a string.""" line = 1 col = 1 eol = None # last end of line character for c in input[:index]: if c == '\n' or c == '\r': if eol is None or eol == c: eol = c line += 1 col = 1 else: # ignore second of '\n\r' and '\r\n' sequences eol = None else: col += 1 return (line, col)
f08217651d11ed09c1e100368aa8cc869c37e386
697,713
import itertools def flatten(sequence): """ Get a flat list out of a list of lists. """ return list(itertools.chain(*sequence))
1c3c7c41969c7e172083e73f2e2aa5731fb56ada
697,715
def equal_nan(request): """Fixture to whether consider nan as equal when comparing fields.""" return request.config.getoption("equal_nan")
8fce969c2c84201822db735d46574644124c5c1a
697,716
def round2(number: float) -> float: """ Rounds a number to the second decimal. """ return round(number, 2)
5c788d01bce28831145391dcb261362b8a2208f3
697,717
def flipslash(value): """ Convert all backslashes to forward slashes (for apache) """ return value.replace("\\", '/')
8fa5abe7c334e0b229aa7e9b2477c3c9aecf38e3
697,718
def binary_search(items, desired_item, start=0, end=None,): """Standard Binary search program takes Parameters: items= a sorted list desired_item = single looking for a match (groovy baby) start= int value representing the index position of search section end = end boundary of the search section; when end == start Returns: None = only returned if the desired_item not found in items pos = returns the index position of desired_item if found. """ if end == None: end = len(items) if start == end: return None # raise ValueError("%s was not found in the list." % desired_item) pos = (end - start) // 2 + start if desired_item == items[pos]: return pos elif desired_item > items[pos]: return binary_search(items, desired_item, start=(pos + 1), end=end) else: # desired_item < items[pos]: return binary_search(items, desired_item, start=start, end=pos)
7adb942e73e5c3945190c7d88882763f9f7b4f08
697,721
import importlib def get_class(m): """Import class from string :param m: string or class to be imported :type m: str or class :rtype: class >>> get_class('microtc.textmodel.TextModel') <class 'microtc.textmodel.TextModel'> """ if isinstance(m, str): a = m.split('.') p = importlib.import_module('.'.join(a[:-1])) return getattr(p, a[-1]) return m
9a8c55524a47224a4e916191d66428aad2bc08d5
697,724
import math def vec3(a, b, norm=1.0): """ x,y,z <- vec3(a, b, norm=1.0) returns the vector a, b scale to norm """ dx = b[0]-a[0] dy = b[1]-a[1] dz = b[2]-a[2] l = norm / math.sqrt( dx*dx + dy*dy +dz*dz) return [dx*l, dy*l, dz*l]
db0f53cad9c472dd903cb18aabd1a6fad275b5bd
697,726
def path_to_url(path): """Convert a system path to a URL.""" return '/'.join(path.split('\\'))
79bfd1715420002371fe4201863d736bf9e3b2bf
697,728
def _use_reasonable_speed(preset, frame_count): """Return a reasonable speed parameter for the given animation length.""" return preset.settings.get("speed", 0.25) * (frame_count / 30.0)
9327131fbe8f55ba1ee5c1ccc932132ad3d3162a
697,729
def blend_color_dodge(cb: float, cs: float) -> float: """Blend mode 'dodge'.""" if cb == 0: return 0 elif cs == 1: return 1 else: return min(1, cb / (1 - cs))
5a96383ce6f71aca42639c7ac4962ea74ecd02c6
697,730
def drop(n, xs): """ drop :: Int -> [a] -> [a] drop(n, xs) returns the suffix of xs after the first n elements, or [] if n > length xs """ return xs[n:]
e9261686022f5419edade3b47e82c68bd52b5cd8
697,737
def formatIntervalHours(cHours): """ Format a hours interval into a nice 1w 2d 1h string. """ # Simple special cases. if cHours < 24: return '%sh' % (cHours,); # Generic and a bit slower. cWeeks = cHours / (7 * 24); cHours %= 7 * 24; cDays = cHours / 24; cHours %= 24; sRet = ''; if cWeeks > 0: sRet = '%sw ' % (cWeeks,); if cDays > 0: sRet = '%sd ' % (cDays,); if cHours > 0: sRet += '%sh ' % (cHours,); assert len(sRet) > 0; assert sRet[-1] == ' '; return sRet[:-1];
d7c9be3110eb1ecbfb57ae51854d2e576519ced3
697,739
def get_both_filenames(filename): """ Get a list of both filenames for FUV data Regardless if rootname_corrtag_a.fits or rootname_corrtag_b.fits is passed in, both will be returned in a list. Parameters ---------- filename : str full path to COS file Returns ------- files : tuple rootname_corrtag_a.fits, rotname_corrtag_b.fits """ if '_a.fits' in filename: other_filename = filename.replace('_a.fits', '_b.fits') elif '_b.fits' in filename: other_filename = filename.replace('_b.fits', '_a.fits') else: raise ValueError("filename doesn't match FUV convention".format(filename)) filename_list = [filename, other_filename] filename_list.sort() return (filename_list[0], filename_list[1])
76449b6f2719d5c6b7ee6dd01f730b9193e368da
697,747
def _get_greensf_group_name(hdffile): """ Return the name of the group containing the Green's function elements :param hdffile: h5py.File of the greensf.hdf file :returns: str of the group name containing the Green's Function elements """ if '/GreensFunctionElements' in hdffile: return 'GreensFunctionElements' elif '/Hubbard1Elements' in hdffile: return 'Hubbard1Elements'
fa5a8c65cad63b3053d8b55af95c9c4547493793
697,749
import yaml def from_yml(yml_string: str) -> dict: """Load the given YAML string into a dict :param yml_string: YAML string to work on :type yml_string: str :return: dict representation of the given YAML string :rtype: dict """ config: dict = yaml.safe_load(yml_string) return config
28868d568fa519e795df4a32ecae8608da6465d6
697,750
import string import random def generator(length): """Generate a random sequence of bytes Args: length (int): length Returns: bytes: random sequence of bytes """ letters = string.printable return ''.join(random.choice(letters) for i in range(length)).encode()
e7a7f22ec3470e9bf45b0084fdc9da917c1f18fb
697,751
import logging def get_loglevel(loglevel): """ Check valid log level supplied and return numeric log level """ numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError("Invalid log level: %s" % loglevel) return numeric_level
017bc0be3e5feb890ccd7477bedb87d1077f1ef5
697,753
def safe_column_name(name): """Generate SQL friendly column name""" return '"{}"'.format(name).upper()
495b54a94c4350a8f20df5a1416a5c8c10d7a559
697,756
def column_name_list(columns): """ Gets a comma-separated list of column names. :param columns: The list of columns. :returns: A comma-separated list of column names. """ if not columns: return '' return ', '.join([column.name for column in columns])
7c40e420e55368454768d84229f959a11b5c00dd
697,759
def positiveaxis(axis, ndim): """Positive axis Args: axis(num): dimension index ndim(num): number of dimensions Returns: num """ if axis < 0: axis += ndim if axis < 0 or axis >= ndim: raise IndexError("axis out of range") return axis
3075c06cf5843027bc45e04e032798250924f8fd
697,763
def is_desired_workflow(run_json): """ Checks if this run is for the "Presubmit Checks" workflow. """ # Each workflow has a fixed ID. # For the "Persubmit Checks" workflow, it is: # https://api.github.com/repos/taichi-dev/taichi/actions/workflows/1291024 DESIRED_ID = 1291024 return run_json['workflow_id'] == DESIRED_ID
fc5d915fd3e7b8a62075b29fc9dbe8525fc4da0a
697,765
def format_codepoint(codepoint): """Format a codepoint (integer) to a USV (at least 4 hex digits)""" usv = '' if codepoint: usv = f'{codepoint:04X}' return usv
c3ac63ab0218ad90f7191507a825e95a4f4ab80c
697,766
def get_dim_coord_names(cube): """ Returns an ordered list of dimension coordinate names on the cube Args: cube (iris.cube.Cube) Returns: list of str """ return [coord.name() for coord in cube.coords(dim_coords=True)]
ec742b197f02d13e067a250685438f0ed88615e5
697,768
def get_center_of_mass(centers_of_mass, masses): """Determine the center of mass of a set of objects Args centers_of_mass [*shape, num_objects, 2] masses [*shape, num_objects] Returns [*shape, 2] """ return (centers_of_mass * masses[..., None]).sum(-2) / masses.sum(-1)[..., None]
22c95024120579ed8471f093ddedd9da75b4ed0f
697,770
import copy def start_board(solution_board): """Generates an empty board from a solution board""" empty_board = copy.deepcopy(solution_board) for row in empty_board: for index in range(len(row)): row[index] = '-' return empty_board
fafa73da5867bf7ae324a0531c64c16ae5789f0d
697,772
def safe_get(list, index, fallback_value): """Similar to dict's .get(key, fallback) function but for lists. Returns a fallback/default value if the index is not valid for the list, otherwise returns the value at that index. Args: list (_type_): a list-like object index (_type_): an index into the list fallback_value (_type_): any value to be returned when the indexing fails Returns: _type_: the value in the list, or the fallback_value is the index is not valid for the list. """ try: return list[index] except IndexError: return fallback_value
bf799f45a04335adc7673aff155b301333ff8e26
697,773
def clusters_to_labels(clusters): """ :param clusters: List of lists, each sublist contains doc ids in that cluster :return labels: Dict of [doc_id, cluster_label] where cluster_label are assigned from positive ints starting at 1 """ labels = dict() for label, cluster in enumerate(clusters): for doc_id in cluster: labels[doc_id] = label return labels
4a443b729965d4632dc3ff68cb0828bcc2f4f2ff
697,774
import ftplib def connectToFTP(host, usern, passw,verb): """ Creates an FTP Connection with the given details using FTPLib. :param host: Hostname (e.g. IP address) of the connection :param usern: Username to login with :param passw: password :param verb: print errors or not. :return: FTP Connection object. Success:1, Failure:0 """ try: ftp = ftplib.FTP(host, usern, passw) ftp.encoding = "utf-8" ftp.set_pasv(False) except Exception as e: if verb: print(e) print("\nERROR while creating FTP Connection \n") return None,0 return ftp,1
f6c4dabedbc400204d85cf699a7c592b38704d7d
697,779
def call_instance_method(instance, name, args, kwargs): """indirect caller for instance methods for multiprocessing Args: instance: the instance to call method with name (str): method name to call args (tuple or None): arguments to be passed to getattr(instance, name) kwargs (dict or None): kwargs to be passed to getattr(instance, name) Returns: the returned values of getattr(instance, name) """ if args is None: args = () if kwargs is None: kwargs = {} return getattr(instance, name)(*args, **kwargs)
30831cafeab2a4f3310a21c41a03e8d3ec9a43ec
697,782
def refactorize(arr, first_na, na_sentinel=-1): """ Modify `arr` *inplace* to match pandas' factorization rules. This detects the code missing values were assigned, sets those to `na_sentinel`, and shifts codes above that value down by 1 to fill the hole. Parameters ---------- arr : ndarray First return value from :meth:`pandas.factorize` first_na : int The index location of the first missing value na_sentinel : int, default -1 Value to set for missing values. """ # A naive benchmark shows that this gets ~285x speedup # with numba on a 10,000 element array. na_code = arr[first_na] for i in range(len(arr)): val = arr[i] if val == na_code: arr[i] = na_sentinel elif val > na_code: arr[i] -= 1 return arr
31d5e07650bb0ec6f93d59d453d8de859ab0c621
697,785
def get_file_id(service, file_name, mime_type=None, parent_id=None): """Return the ID of a Google Drive file :param service: A Google Drive API service object :param file_name: A string, the name of the file :param mime_type: A string, optional MIME type of file to search for :param parent_id: A string, optional id of a parent folder to search in :return file_id: A string, file ID of the first found result """ file_id = None query = """name='{}' and trashed=False """.format(file_name) if parent_id: query += "and parents in '{}'".format(parent_id) if mime_type: query += "and mimeType in '{}'".format(mime_type) try: results = service.files().list( q=query, fields='files(name, id)').execute() if len(results['files']) > 1: print('Multiple files found, retrieving first from list') file_id = results['files'][0]['id'] except Exception as e: print('An error occurred: {}'.format(e)) return file_id
e8e371ea740ca4be55b35baa74c28207ca6b7b4d
697,787
def get_section(f, first_delim, second_delim): """ Some open-source indicator downloads contain multiple sections. This will return the section of the file f that is between the first_delim and second_delim :param f: The file containing the section to be processed :type f: file :param first_delim: A string representing the beginning of the section :type first_delim: str :param second_delim: A string representing the terminator of the section :type second_delim: str :returns: list """ g = [] line = f.readline() while line.find(first_delim) == -1: line = f.readline() if not line: return(None) line = f.readline() if second_delim != "": while line.find(second_delim) == -1: g.append(line) line = f.readline() else: for line in f: g.append(line) return(g)
d0d08cf5fc157b7361c4ff5e20fe466de76d93fb
697,788
from typing import Dict def number_topics_and_clusters(model, level: int) -> Dict[str, int]: """Get the number of topics and clusters for a level of the model hierarchy.""" model.get_groups(level) return { "n_topics": model.groups[level]["Bw"], "n_clusters": model.groups[level]["Bd"], }
ff587450e06bb6ce7ce97bd454c310a7b8d9e4b1
697,790
import re def get_year_from_date_str(date_str): """ Retrieve only the year from a text string. """ return re.findall(r'\d{4}', date_str) if date_str else None
b2b02e97963d12236f20dad5d6b5337404a0dfc7
697,791
def limit(value, min_val, max_val): """Returns value clipped to the range [min_val, max_val]""" return max(min_val, min(value, max_val))
8005e5cc9b6947e265f93eed1aa19f4e2abcc5ab
697,794
def int2tap(x): """Convert integer to tap position.""" if x[0] == "-": res = "pre" + x[1:] else: res = "post" + x return res
2af5e1b98258dfb921005a454689d862bdf3d9fe
697,795
def generate_list(start, stop, step): """ >>> generate_list(0, 5, 1) [0, 1, 2, 3, 4] >>> generate_list(0, 0, 1) [] >>> generate_list(5, 10, 2) [5, 7, 9] >>> generate_list(10, 5, -2) [10, 8, 6] """ idx = start lst = [] if idx < stop: while idx < stop: lst.append(idx) idx += step else: while idx > stop: lst.append(idx) idx += step return lst
51082bd4acf65abdaacbb98b429d79dcb718d9f7
697,796
def blocksearch(block, name): """ Recursive search for name in block (inner blocks) Args: name (str): search term Returns: Block OR False """ if hasattr(block, 'tokens'): for b in block.tokens[1]: b = (b if hasattr(b, 'raw') and b.raw() == name else blocksearch( b, name)) if b: return b return False
da9f762dddabe762dbabd80addebc0a957b04135
697,798
def get_provenance_record(ancestor_files): """Create a provenance record describing the diagnostic data and plot.""" record = { 'caption': ('(a) Zonally averaged sea surface temperature (SST) error in CMIP5 ' 'models. (b) Equatorial SST error in CMIP5 models. (c) Zonally ' 'averaged multi-model mean SST error for CMIP5 (red line) together ' 'with inter-model standard deviation (shading). (d) Equatorial ' 'multi-model mean SST in CMIP5(red line) together with inter-model ' 'standard deviation (shading) and observations (black). Model ' 'climatologies are derived from the 1979-1999 mean of the historical ' 'simulations. The Hadley Centre Sea Ice and Sea Surface Temperature ' '(HadISST)(Rayner et al., 2003) observational climatology for ' '1979-1999 is used as reference for the error calculation (a), (b), ' 'and (c); and for observations in (d).'), 'statistics': ['anomaly', 'mean', 'stddev', 'clim'], 'domains': ['eq', 'global'], 'plot_types': ['geo', 'sect', 'zonal'], 'authors': ['zimmermann_klaus'], 'projects': ['crescendo'], 'references': ['flato13ipcc', 'hadisst'], 'realms': ['ocean'], 'themes': ['phys'], 'ancestors': ancestor_files, } return record
d0c68eb45dd0a9cee0746294b4fd3202dc1003de
697,800
def compute_statistics(datasetKaggle): """ Outputs various statistics of the Kaggle dataset :param datasetKaggle: the input dataset formatted as a dictionary :type datasetKaggle: dict :return: dictionary with the processed information :rtype: dict """ yesNoAnswer = 0 annotationsMax = 0 averageLength = 0 totalExamples = len(datasetKaggle) for example in datasetKaggle: annotationsMax = max(len(example['annotations']), annotationsMax) # check for the maximum number of annotations if example['annotations'][0]['yes_no_answer'] != 'NONE': yesNoAnswer += 1 averageLength = len(example['document_text']) / totalExamples output = {'annotationsMax': annotationsMax, 'num_yesNo': yesNoAnswer, 'text_avgLength': averageLength} return output
5d41592f45daa252d2a55c71193c1f022f4f53a2
697,803
def filter_ice_border(ice_thick): """Sets the ice thickness at the border of the domain to zero.""" ice_thick[0, :] = 0 ice_thick[-1, :] = 0 ice_thick[:, 0] = 0 ice_thick[:, -1] = 0 return ice_thick
cbe448c2659cab832cb499765fb0d4ae8f95d751
697,806
def column_names_from_cursor(cur): """returns column names as a list when provided cx_Oracle Cursor""" column_names = [] for column_info in cur.description: column_names.append(column_info[0]) return column_names
f5b1606bec5d32a67438c15c7d655079d189e512
697,810
from typing import Callable import functools def compose(*function: Callable) -> Callable: """Compose functions. I.e:: ``lambda x: f(g(x))`` can be written: ``compose(f, g)`` Args: *function (Callable): Any number of functions to compose together. Output type of function N must be the input type of function N+1. Returns: Callable: A composed function with input type same as the firs function, and output type same as the last function. """ return functools.reduce(lambda f, g: lambda x: f(g(x)), function, lambda x: x)
8c3b04ffeb03303a49c006fa347e150f94700d40
697,812
def echo(value: str): """Test Celery task to echo a string. Args: value (str): string to return. Returns: str, the input argument string. """ return value
cc96b1f80147950cd0a084f45d379e5be0156c36
697,816
def distance(x1, y1, x2, y2): """ Distance between two points (x1, y1), (x2, y2). """ return pow((pow(x1 - x2, 2) + pow(y1 - y2, 2)), 0.5)
2bd9c1c8be018481998cff594f08c98d29be04de
697,821
def add_or_get_merge_anchor_index(lane, pos): """ Add a merge anchor at pos if needed, return the index of the merge anchor """ if not hasattr(lane, "merge_anchors"): lane.merge_anchors = [] for ind, e in enumerate(lane.merge_anchors): if (e[1] is None and pos == lane.start) or e[1] == pos: return ind lane.merge_anchors.append([lane.anchor, None if pos == lane.start else pos]) return len(lane.merge_anchors) - 1
ee52833b37d84ca765f8e5c56ec9e8f52691451a
697,823
def is_hovering(rect, mouse_pos): """Checks if a mouse is hovering over a rect""" if ( rect.left <= mouse_pos[0] <= rect.right and rect.top <= mouse_pos[1] <= rect.bottom ): return True return False
e17f4b8ee6e0b5f174473aa88388ff81ab67ea66
697,824
import math def compass_bearing(pointA, pointB): """ Calculates the bearing between two points. The formulae used is the following: θ = atan2(sin(Δlong).cos(lat2), cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong)) :Parameters: - `pointA: The tuple representing the latitude/longitude for the first point. Latitude and longitude must be in decimal degrees - `pointB: The tuple representing the latitude/longitude for the second point. Latitude and longitude must be in decimal degrees :Returns: The bearing in degrees :Returns Type: float """ if (type(pointA) != tuple) or (type(pointB) != tuple): raise TypeError("Only tuples are supported as arguments") lat1 = math.radians(pointA[0]) lat2 = math.radians(pointB[0]) diffLong = math.radians(pointB[1] - pointA[1]) x = math.sin(diffLong) * math.cos(lat2) y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diffLong)) initial_bearing = math.atan2(x, y) # Now we have the initial bearing but math.atan2 return values # from -180° to + 180° which is not what we want for a compass bearing # The solution is to normalize the initial bearing as shown below initial_bearing = math.degrees(initial_bearing) compass_bearing = (initial_bearing + 360) % 360 return compass_bearing
4ef715ab9519e306a12c42ab87f0a624d7e0589c
697,825
def copy_dict(other_dict): """ Returns a copy of the dictionary, separate from the original. This separation is only at the top-level keys. If you delete a key in the original, it will not change the copy. >>> d1 = dict(a=1, b=2) >>> d2 = dict(**d1) >>> del d1['a'] >>> 'a' in d1 False >>> 'a' in d2 True If any of the top-level values are mutable (list, dict) then changes to the original will appear in the copy. >>> d1 = dict(a=[1, 2], b=[3, 4]) >>> d2 = dict(**d1) >>> d1['a'].pop() 2 >>> d1['a'] [1] >>> d1['a'] == d2['a'] True Tested in Python 3.4. """ new_dict = dict(**other_dict) return new_dict
a8d25212b2bf0524a5f434fd590c1ac3ec8b8810
697,829