content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def list_services(config_dict): """ List available services Args: config_dict (dict): configuration dictionary Returns: list: list of available services """ return list(config_dict.keys())
a3226589405292f0bcef99aabaec71722aaeb1db
697,838
def actions_by_behavior(actions): """ Gather a dictionary grouping the actions by behavior (not SubBehaviors). The actions in each list are still sorted by order of their execution in the script. @param actions (list) of Action objects @return (dict) where the keys are behaviors and the values are lists of actions """ split = {} for action in actions: for behavior in action.behaviors: if behavior.name not in split: split[behavior.name] = [] split[behavior.name].append(action) return split
ebf97a5837a8d7de735207c5df5e19af2438510a
697,839
import logging def get_loglevel(loglevel): """ Check whether a valid log level has been supplied, and return the numeric log level that corresponds to the given string level. Parameters ---------- loglevel: str The loglevel that has been requested Returns ------- int The numeric representation of the given loglevel """ numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError("Invalid log level: %s" % loglevel) return numeric_level
a662e00caa21bf5632b28f26cbb604229db9705a
697,842
import base64 import requests import json def retrieve_appdevtools_access_token(username, password, auth_endpoint): """ Retrieve access token for appdev tools Args: username: username password: password auth_endpoint: authorization endpoint Returns: token """ # Form credentials request credentials = "{0}:{1}".format(username, password) base_encoded_credential = base64.b64encode(bytes(credentials, "utf-8")).decode().replace('\n', '') # Form application headers headers = { "content-type": "application/json", "Authorization": "Basic {}".format(base_encoded_credential) } # Form login request login_request = { "username": username, "password": password } # Issue request, skip SSL validation response = requests.post(url=auth_endpoint, json=login_request, headers=headers, verify=True) # Get access token return json.loads(response.text)["token"]
8f08f4be45998199aa9db151fec26722a13eb060
697,844
def count_literals(term): """ Counts the number of literals in a term Args: term : A string containing literals Returns: The number of literals in term """ count = 0 for char in term: if char != "_": count+=1 return count
48bb0066f09994627a4b7c2b04a657df1106a1c3
697,847
import json def get_json_from_file(filename, warn = False): """Loads json from a file. Optionally specify warn = True to warn, rather than fail if file not found.""" f = open(filename, 'r') return json.loads(f.read())
6916e0239067b977d0671e1e38006f457da94ae7
697,849
def list_frequencies(list_of_items): """ Determine frequency of items in list_of_items. """ itemfreq = [list_of_items.count(p) for p in list_of_items] return dict(zip(list_of_items,itemfreq))
8772e64d0abd400dfe333747e7fce0d816a0f59f
697,854
import math def normStdevMask(img,mask): """ normalize an image with mean = 0 and stddev = 1.0 only inside a mask """ n1 = mask.sum() if n1 == 0: return img sum1 = (img*mask).sum() sumsq1 = (img*img*mask).sum() avg1 = sum1/n1 std1 = math.sqrt((sumsq1 - sum1*sum1/n1)/(n1-1)) std2 = img.std() return (img - avg1) / std1
b5493d1e8a0f6594badb97c9376777117263eb0b
697,858
from typing import Dict from datetime import datetime def inject_now() -> Dict: """Inject current datetime into request context.""" return dict(request_datetime=datetime.now())
e186d4b478a5da5bcdf83e2e34cd381de8d039be
697,860
def _make_eof_producer(producer): """ Send a special EOF byte sequence to terminate the request. Args: producer (Callable[[str, socket.socket], None]): Request producer. Returns: Callable[[str, socket.socket], None]: Request producer that sends 'END-OF-FILE' at end of request. """ def new_producer(audio_input, sock): producer(audio_input, sock) sock.sendall(b'END-OF-FILE') return new_producer
9c035b1f790cab96c9cffc91e6dcf8a13017a4cc
697,861
def in_key_rd_dicts( key_rd_dict, key_rd_dicts ): """Return True if key_rd_dict is contained in the list of key_rd_dicts.""" k = key_rd_dict.keys()[ 0 ] v = key_rd_dict[ k ] for key_rd_dict in key_rd_dicts: for key, val in key_rd_dict.items(): if key == k and val == v: return True return False
04c691a1a78834c7d660ae3036e97ea0e78393de
697,862
import re def split_text(text, pattern=r";|。|;|,|,"): """split text by pattern Args: text (str): text pattern (regexp, optional): expression. Defaults to r";|。|;|,|,". Returns: str: text split by pattern Examples: >>> s = String() >>> text = "收快递的时候最怕收不到货,所以购物的时候一定要把地址写清楚,这样才会精准的送到你手里,我告诉大家以后怎么写:“本宇宙-拉尼凯亚超星系团-室女座星系团-本星系群-银河系-猎户臂-太阳系-第三行星-地球-亚洲板块-中国-xxx-xxx-xxx”这样可以保证不会送到其他宇宙去" >>> s.split_text(text=text) ['收快递的时候最怕收不到货', '所以购物的时候一定要把地址写清楚', '这样才会精准的送到你手里', '我告诉大家以后怎么写:“本宇宙-拉尼凯亚超星系团-室女座星系团-本星系群-银河系-猎户臂-太阳系-第三行星-地球-亚洲板块-中国-xxx-xxx-xxx”这样可以保证不会送到其他宇宙去'] """ txts = re.split(pattern, text) return txts
080b63c947f5b9aca749e2ec4981f64c09e2ad43
697,864
def sphere_sre(solution): """ Variant of the sphere function. Dimensions except the first 10 ones have limited impact on the function value. """ a = 0 bias = 0.2 x = solution.get_x() x1 = x[:10] x2 = x[10:] value1 = sum([(i-bias)*(i-bias) for i in x1]) value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2]) return value1 + value2
38987c77a6586a0bfab1d94cc4a1511e9418349f
697,867
from typing import Union import pytz from datetime import datetime def localnow(tz: Union[pytz.BaseTzInfo, str] = "US/Central") -> datetime: """ Get the current datetime as a localized datetime object with timezone information Keyword Arguments: tz {Union[pytz.timezone, str]} -- localize datetime to this timezone (default: "US/Central") Returns: datetime -- localized datetime with tzinfo """ if isinstance(tz, str): tz = pytz.timezone(tz) return datetime.now().astimezone(tz)
3dd9132c4adebacf5d348ec34523584642f7140b
697,868
import torch def threshold_mask(weights, threshold): """Create a threshold mask for the provided parameter tensor using magnitude thresholding. Arguments: weights: a parameter tensor which should be pruned. threshold: the pruning threshold. Returns: prune_mask: The pruning mask. """ return torch.gt(torch.abs(weights), threshold).type(weights.type())
43ca3f018f047e2ac46a83c36c368798b69e104a
697,874
def string_is_yes(string, default=None): """ Mapping of a given string to a boolean. If it is empty or None (evaluates to False), and `default` is set, `default` is returned. If the lowercase of the string is any of ['y', '1', 'yes', 'true', 'ja'], it will return `True`. Else it will return `False` :param string: The input :type string: str :param default: default result for empty input :type default: None | bool :return: result (True/False/default) :rtype: bool """ if not string and default is not None: return default # end if return string.lower() in ['y', '1', 'yes', 'true', 'ja']
68482f78f8d4464891f14c19953e1a4785fe0811
697,877
def arrays_avg(values_array, weights_array=None): """ Computes the mean of the elements of the array. Parameters ---------- values_array : array. The numbers used to calculate the mean. weights_array : array, optional, default None. Used to calculate the weighted average, indicates the weight of each element in the array (values_array). Returns ------- result : float. The mean of the array elements. """ n = len(values_array) if weights_array is None: weights_array = [1] * n elif len(weights_array) != n: raise ValueError( "values_array and qt_array must have the same number of rows" ) result = 0 for i, j in zip(values_array, weights_array): result += i * j return result / n
2f24da412c548c1da53338f2f66d57d18473f798
697,882
def issue_dictionary_from(column, issue) -> dict: """Map issue to dictionary with field names.""" return { "project column": column.name, "issue title": issue.title, "issue description": f"{issue.body}\n\n---\n\n{issue.html_url}", "labels": ";".join( f"'{label.name}'" for label in issue.original_labels ), }
eae9e6cdf7ea3d715ad2e1dc8c31672c8c53762e
697,885
def age_bin(age, labels, bins): """ Return a label for a given age and bin. Argument notes: age -- int labels -- list of strings bins -- list of tuples, with the first tuple value being the inclusive lower limit, and the higher tuple value being the exclusive upper limit """ for x in range(len(bins)): if age < bins[x][1] and age >= bins[x][0]: return labels[x]
9caccc667b55f66824bcf8802161384590cc2a08
697,886
def list_intersect(first, second): """ Returns elements found in first that are in second :param first: :param second: :return: """ second = set(second) return [item for item in first if item in second]
74cf1fc791a57c299f7fcf2369c105d68bda772e
697,888
import io import base64 def image_file_to_b64(image_file: io.BytesIO) -> bytes: """ Encodes an image file as Base64. To obtain the stringified Base64 version of the image, you can convert the output like so: ````python image_file_to_b64(my_image_file).decode() ```` Arguments: image_file: The BytesIO file object to be converted. Returns: Bytes representation of the Base64 encoded image. """ return base64.b64encode(image_file.getvalue())
2606f21fca825ea22c06f6372576110c64dc511c
697,890
from typing import Dict from typing import Tuple def read_multiscale_params(cfg: Dict[str, dict]) -> Tuple[int, int]: """ Returns the multiscale parameters :param cfg: configuration :type cfg: dict :return: - num_scales: number of scales - scale_factor: factor by which each coarser layer is downsampled :rtype: tuple(int, int ) """ if "multiscale" in cfg: # Multiscale processing in conf multiscale_ = multiscale.AbstractMultiscale(**cfg["multiscale"]) # type: ignore num_scales = multiscale_.cfg["num_scales"] scale_factor = multiscale_.cfg["scale_factor"] else: # No multiscale selected num_scales = 1 scale_factor = 1 return num_scales, scale_factor
8021cc90c4a4740854a92a7d56524d7037e01988
697,894
def get_image_type(data): """Return a tuple of (content type, extension) for the image data.""" if data[:2] == "\xff\xd8": return ("image/jpeg", ".jpg") return ("image/unknown", ".bin")
1677cb375a7b405b8a229f1246bdb27ed36dc654
697,900
def parse_conjugations(group): """ Parse conjugations in a word group. :param group: string of a word group :return: list of parsed conjugations """ return list( map( lambda x: x.split('/')[0].strip(), group.split(' – ') ) )
0a1eca118a1f194a51a889a0fe30fe72488d6ada
697,901
def read_thresholds(path): """ Read the p-value thresholds for each tumour type """ thresholds = {} with open(path) as f: for line in f: line = line.strip() line = line.split('\t') tumour = line[0] threshold = float(line[1]) thresholds[tumour] = threshold return thresholds
2615cd3b38e4ce1d80baacd06cf955895f0984ee
697,904
def linear_regression_line(mb): """ Given the output of ``linear_regression()`` function, or provided with a tuple of ``(m, b)``, where ``m`` is the slope and ``b`` is the intercept, ``inear_regression_line()`` returns a function that calculates y values based on given x values. Args: mb: A list or tuple of [m, b] or (m, b) where m is the slope and b is the y intercept. Returns: A function that accepts ints, floats, lists, or tuples of x values and returns y values. Examples: >>> linear_regression_line(linear_regression([0, 1], [0, 1]))(1) 1.0 >>> linear_regression_line(linear_regression([1,3,5,7,9], [10,11,12,13,14]))([1, 2, 3]) [10.0, 10.5, 11.0] >>> linear_regression_line([.5, 9.5])([1, 2, 3]) [10.0, 10.5, 11.0] >>> linear_regression_line(9.5) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: linear_regression_line() expects a list or tuple of (slope, intercept)... >>> linear_regression_line([2, 3, 4]) Traceback (most recent call last): ... ValueError: The list or tuple containing the slope and intercept needs to be of length = 2. """ if type(mb) not in [list, tuple]: raise TypeError('linear_regression_line() expects a list or tuple of ' '(slope, intercept) or [slope, intercept] form.') if len(mb) != 2: raise ValueError('The list or tuple containing the slope and intercept ' 'needs to be of length = 2.') m = mb[0] b = mb[1] def line_function(x): """ Function created and returned by linear_regression_line(). """ # if int or float, return one value if type(x) in [int, float]: return((x * m) + b) # otherwise elif type(x) in [list, tuple]: y_values = [] for ii in x: y_values.append(((ii * m) + b)) return(y_values) return(line_function)
dbc61ffafe603e8e6ec21f87debc170e3b6eb289
697,907
import re def convert_doctoc(html): """ Convert doctoc to confluence macro :param html: html string :return: modified html string """ toc_tag = '''<p> <ac:structured-macro ac:name="toc"> <ac:parameter ac:name="printable">true</ac:parameter> <ac:parameter ac:name="style">disc</ac:parameter> <ac:parameter ac:name="maxLevel">7</ac:parameter> <ac:parameter ac:name="minLevel">1</ac:parameter> <ac:parameter ac:name="type">list</ac:parameter> <ac:parameter ac:name="outline">clear</ac:parameter> <ac:parameter ac:name="include">.*</ac:parameter> </ac:structured-macro> </p>''' html = re.sub('\<\!\-\- START doctoc.*END doctoc \-\-\>', toc_tag, html, flags=re.DOTALL) return html
b2a52126d6a234894dcf6f98f0d399c309587a7e
697,911
def fromScopus(scopus_author): """Fetch all publications associated with Scopus author Parameters ---------- scopus_author : AuthorRetrieval Scopus author retrieval object (scopus.author_retrieval.AuthorRetrieval) Returns ------- bibs : list List of Scopus search publications (scopus.scopus_search.Document) """ bibs = scopus_author.get_documents() return bibs
88aeca0f28af9ca3256e77f6e77207ffb51cefa0
697,915
def org_commits_by_day(df_raw): """Returns all commits with a count and a date index by day""" odf_day = df_raw.resample('D').sum() return odf_day
bb003b8ce1a0a28e632cfc78ffa3483359259e49
697,918
def make_queue_name(mt_namespace, handler_name): """ Method for declare new queue name in channel. Depends on queue "type", is it receive event or command. :param mt_namespace: string with Mass Transit namespace :param handler_name: string with queue time. MUST be 'command' or 'event' :return: new unique queue name for channel """ return '{}.{}'.format(mt_namespace, handler_name)
f74a7be8bf46a56f760b087fe47c9eda58d9f51e
697,919
def _fix_cookiecutter_jinja_var(value, replace='cookiecutter.'): """Remove 'cookiecutter.' string from 'cookiecutter.varname' jinja strings Can be used to remove different substrings as well by passing a string to the optional ``replace`` parameter. :param value: The string value within which to replace text :type value: str :param replace: The string to be removed from the ``value`` input, defaults to 'cookiecutter.' :type replace: str :return: Returns the input value with the ``replace`` string removed if ``value`` is of type str, otherwise it just returns the ``value`` input """ if type(value) is str: return value.replace(replace, "") else: return value
fc04d36a368fe7dd8c21a8dd3abe46f60d4d3d5e
697,921
def flatten_dict(row, keys=[('title',), ('street', 'city', 'postalCode')]): """Flatten a dict by concatenating string values of matching keys. Args: row (dict): Data to be flattened Returns: flat (str): Concatenated data. """ flat = '' # The output data for ks in keys: # If any keys are present, join the values if not any(k in row for k in ks): continue flat = '\n'.join(row[k] for k in ks if k in row) break assert len(flat) > 0 # Ensures that a key has been found, # otherwise you'll need to provide more keys return flat
b4bafb0ec19a19d4223d7e9a94e3b8630cad1066
697,923
import string def get_range_to_here_hashes(repo, start): """Return a list of strings corresponding to commits from 'start' to here. The list begins with the revision closest to but not including 'start'. Raise a ValueError if any of the returned values are not valid hexadecimal. :repo: a callable supporting git commands, e.g. repo("status") :start: a reference that log will understand :returns: a list of strings corresponding to commits from 'start' to here. """ hashes = repo("log", start + "..", "--format=%H").split() if not all(c in string.hexdigits for s in hashes for c in s): raise ValueError( "phlgit_log__getRangeToHereHashes() invalid hashes\n" + str(hashes)) hashes.reverse() return hashes
41a8795f489d1646ba48366c791308a6d1435b3e
697,926
import re def check_token(token): """ Checks if the given token is a valid UUID.""" valid = re.compile(r"^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-" r"[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") return valid.match(token)
be675b2fa04aac96b006ab94d8c0f65d8c02132f
697,929
import requests from bs4 import BeautifulSoup def url_to_soup(url): """url -> soup""" html = requests.get(url) return BeautifulSoup(html.text, 'html.parser')
8b3e6a67c2123ca134d581631321cb8a8daf132a
697,931
def is_vowel(char): """Check if it is vowel.""" return char in ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
97f747d96e08cd15fe7a1b742b36f5ff6cc2f3fc
697,932
from typing import Iterable from typing import List def filter_wikipathways_files(file_names: Iterable[str]) -> List[str]: """Filter files that have not 'ttl' extension or not start with 'WP'.""" return [ file_name for file_name in file_names if file_name.startswith('WP') and file_name.endswith('.ttl') ]
abac5be763309986b951d574a72c0f2652b2a981
697,934
def attr_dict(cls: type, data: dict): """ Removes keys from the passed dict ``data`` which don't exist on ``cls`` (thus would get rejected as kwargs), then create and return an instance of ``cls``, passing the filtered data as keyword args. Ensures that any keys in your dictionary which don't exist on ``cls`` are automatically filtered out, instead of causing an error due to unexpected keyword arguments. Example:: >>> data = dict(timestamp="2019-01-01Z00:00", producer='eosio', block_num=1234, example='hello') >>> my_block = attr_dict(EOSBlock, data) :param cls: :param data: :return: """ if hasattr(cls, '__attrs_attrs__'): cls_keys = [atr.name for atr in cls.__attrs_attrs__] else: cls_keys = [k for k in cls.__dict__.keys() if k[0] != '_'] clean_data = {x: y for x, y in data.items() if x in cls_keys} return cls(**clean_data)
c764cda2ad772de8b239fe34d39ef3ac92e658e5
697,935
from typing import List def sbd_3_bitwise(ids: List): """ Solution: Use bitwise XOR on a single variable for each element. Complexity: Time: O(n) Space: O(1) """ unique_id = 0 for item in ids: unique_id ^= item return unique_id
d964fe78b2d1cf8de9e4d1cc165ea03a38eb6387
697,937
def process_spatial(geo): """Process time range so it can be added to the dates metadata Parameters ---------- geo : list [minLon, maxLon, minLat, maxLat] Returns ------- polygon : dict(list(list)) Dictionary following GeoJSON polygon format """ polygon = { "type": "Polygon", "coordinates": [ [ [geo[0], geo[2]], [geo[0], geo[3]], [geo[1], geo[3]], [geo[1], geo[2]] ] ]} return polygon
4ff7dea7d7dcefd5b6bfb1605dd50345fc379527
697,938
def int32(x): """Force conversion of x to 32-bit signed integer""" x = int(x) maxint = int(2**31-1) minint = int(-2**31) if x > maxint: x = maxint if x < minint: x = minint return x
854338b7fd748ba5ddbbbd8c697288c6331995c4
697,941
def get_type(transaction): """ :return: the type of the transaction """ return transaction['type']
4f66830f7c1e3bdc5d6b2c4ccc5db493014b9e5f
697,943
def format_bytes(n): """Format bytes as text Copied from dask to avoid dependency. """ if n > 1e15: return "%0.2f PB" % (n / 1e15) if n > 1e12: return "%0.2f TB" % (n / 1e12) if n > 1e9: return "%0.2f GB" % (n / 1e9) if n > 1e6: return "%0.2f MB" % (n / 1e6) if n > 1e3: return "%0.2f kB" % (n / 1000) return "%d B" % n
f82a135f7f5308a04bf53bd3b8d1a05ef5d96de0
697,946
def semideviation(r): """ Returns the semi-deviation (Negative Deviation) of r , r must be a Series or DataFrame """ is_negative = r <0 return r [is_negative].std(ddof=0)
78eb4092f2c7ed5439ef4ee44b5ddef898bd53fa
697,948
def _change_galician_cc_token_before_subtree(sentence, token): """Determine the token directly preceding a subtree in a sentence. Args: sentence (`Sentence`): The sentence. token (`Token`): The root token of the subtree. Returns: str: The ID of the token directly preceding the root token and all of its dependents. """ tokens = [token.id] a = True while a: a = False for t in sentence: if t.head in tokens and t.id not in tokens: tokens.append(t.id) a = True tokens = sorted(tokens) return str(int(tokens[0])-1)
21eebf0ba9ec02af4a7c65e8631a95e83b68372e
697,954
def turn(p1, p2, p3): """ 0 if the points are colinear 1 if the points define a left-turn -1 if the points define a right-turn """ # Compute the z-coordinate of the vectorial product p1p2 x p2p3 z = (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0]- p1[0]) return 0 if z == 0 else int(z / abs(z))
2b8dccbe0111d9cd655f4a6a8bc2131b73f3a3ee
697,956
def worker_number(worker_id): """Get the current worker number.""" id_as_str = "".join(ch for ch in worker_id if ch.isdigit()) if len(id_as_str) == 0: return 0 return int(id_as_str)
3cd6ca7e665fefda9427476e39de5ea23f843f72
697,960
def get_reference_value_from_spec(openapi_spec: dict, reference_path: str) -> dict: """Follows the reference path passed in and returns the object at the end of the path Args: openapi_spec (dict): The openapi.json specification object reference_path (str): a path formatted as "#/foo/bar/baz" Returns: dict: The object if you follow the path """ path_elements = reference_path.split("/") reference_val = openapi_spec for elem in path_elements: if elem == "#": continue else: reference_val = reference_val.get(elem, {}) return reference_val
c32962a3b008825de4c5108d8d1a3ad5db179b17
697,964
def hide_keys(dictionary, keys_to_hide, new_value="xxx"): """ Return a copy of the given dictionary on which specified keys will be replaced by the new_value word (or 'xxx'). :param dictionary: a dictionary :param keys_to_hide: keys to hide in the output dictionary :param new_value: replacement string for keys to hide :return: the new dictionary with hidden items """ _new_dict = {} for key, value in dictionary.items(): _new_dict[key] = new_value if key in keys_to_hide else value return _new_dict
d6b8bc7958c637212899c048a9958ac3d2755894
697,965
def get_other_dims(da, dims_exclude): """ Returns all dimensions in provided dataset excluding dim_exclude | Author: Dougie Squire | Date: 22/04/2018 Parameters ---------- da : xarray DataArray Array to retreive dimensions from dims_exclude : str or sequence of str Dimensions to exclude Returns ------- dims : str or sequence of str Dimensions of input array, excluding dims_exclude Examples -------- >>> A = xr.DataArray(np.random.normal(size=(3,2)), coords=[('x', np.arange(3)), ... ('y', np.arange(2))]) >>> doppyo.utils.get_other_dims(A, 'y') 'x' """ dims = da.dims if dims_exclude == None: other_dims = dims else: if isinstance(dims, str): dims = [dims] if isinstance(dims_exclude, str): dims_exclude = [dims_exclude] other_dims = set(dims) - set(dims_exclude) return tuple([o for o in dims if o in other_dims])
506a8b7193ea95480c83e3d69a31774f77d4fbcc
697,969
def standard_split(text, row_length): """text = abcdefghijklmnopqrstuvwxyz and row_length = 5 abcde fghij klmno pqrst uvwxy z returns ['afkpuz', 'bglqv', 'chmrw', 'dinsx', 'ejoty'] """ output = [] text_length = len(text) # Takes output column by index in turn, taking e.g. the 0th, 5th, 10th ... char # for the 0th column, then the 1st, 6th, 11th ... char for the 1st column etc. for num in range(row_length): count = num output.append([]) while count < text_length: output[-1].append(text[count]) count += row_length return(output)
3a4732433b58777dcf08529b98b90f890e680a29
697,970
def median(x): """Return the median of a list of values.""" m, r = divmod(len(x), 2) if r: return sorted(x)[m] return sum(sorted(x)[m - 1:m + 1]) / 2
337584e40c61bd198496355e38f4a8d4da9310fa
697,971
def _get_args(tp): """Backport of typing.get_args for Python 3.6""" return getattr(tp, "__args__", ())
f265cb6d8d85601d9ee6f20be8f78c8d9819c920
697,972
def bangbang_compressor(bangbang_protocol ): """Compresses the bang bang protocol. Merges chunks of contiguous bangbang chunks into a Tuple of duration (in number of chunks) and which Hamiltonian to apply. Args: bangbang_protocol: List of HamiltonianType values, determines which Hamiltonian should be applied at the corresponding chunk. Returns: List of Tuples containing the Hamiltonian type and the number of chunks to apply the Hamiltonian type for. """ current_mode = None compressed_protocol = [] chunk_counter = 0 for protocol_mode in bangbang_protocol: if current_mode is None: current_mode = protocol_mode chunk_counter = 1 elif current_mode == protocol_mode: chunk_counter += 1 else: compressed_protocol.append((chunk_counter, current_mode)) current_mode = protocol_mode chunk_counter = 1 # Append what's left over if chunk_counter > 0: compressed_protocol.append((chunk_counter, current_mode)) return compressed_protocol
7e3b6e0e5678e705c54c39e31561a908900d9b08
697,976
import yaml def load_config(filename): """Load and return a config file.""" with open(filename, 'r') as f: config = yaml.safe_load(f) return config
5c981d7a5aa7846486062c14c940cdb01cb0b5af
697,980
def argmin_except_c(x,p,c): """ argmin_except_c(x,p,c) Ensure that p is the argmin, i.e. the position of the minimum value in x, but ignores any value of c. Note: - If there are many minimum values then argmin_except_c(x,p,c) will find all these values. - We assume that there are at least one value != c. """ n = len(x) constraints = [x[p] != c] for i in range(n): constraints += [(p != i).implies((x[i] == c) | (x[p] < x[i])) ] return constraints
8046dddc15113ccb5c75fb62ce9ff5434104811e
697,984
def compute(n: int) -> float: """ Compute the recursive sum ``s = 1 / (1**2) + 1 / (2**2) + ... + 1 / (n**2)`` >>> compute(0) 0 >>> compute(4) 1.4236111111111112 """ if n == 0: return 0 return sum(1 / (i ** 2) for i in range(1, n + 1))
974506e3523f1c0cfe646b1fdf54846ff7f96cc4
697,987
from typing import Dict from typing import Any async def refresh_tokens_controller_mock() -> Dict[str, Any]: """Mock refresh tokens controller.""" return { "access_token": "test", "refresh_token": "test", "expires_at": 0, }
9431f1311a1c0fa45a3e538ee70f66f0ef9de477
697,988
def _instance_name_from_url(instance_url): """Extract instance name from url.""" return instance_url.split('/')[-1]
e64bc076be9480a04821e9a9a79f13e25fadad35
697,990
def calculateGC(seq): """Take a sequence as input and calculate the GC %""" gc = round((seq.count("G")+seq.count("C")) / len(seq) * 100, 2) return gc
ed0c863c31214de970d0d0efd81206084b4bfbcf
697,992
import re def strip_html_comments(text): """Strip html comments from text (which doesn't need to be valid HTML)""" return re.sub(r"<!--(.|\s|\n)*?-->", "", text)
4ac4c2061520a8ecdafe77884a1bae9196bc4e21
697,993
import click def cli_option_quiet(func): """ Decorator for adding a reusable CLI option `--quiet`/'-q'. """ # noinspection PyUnusedLocal def _callback(ctx: click.Context, param: click.Option, value: bool): ctx_obj = ctx.ensure_object(dict) ctx_obj["quiet"] = value return value return click.option( '--quiet', '-q', is_flag=True, help="Disable output of log messages to the console entirely." " Note, this will also suppress error and warning messages.", callback=_callback )(func)
4249c71b38d24693b0064157dc313d5798d1529b
697,995
import re def indent_string(s, indent=12): """ Add the given number of space characters to the beginning every non-blank line in `s`, and return the result. """ # This regexp matches the start of non-blank lines: return re.sub('(?m)^(?!$)', indent*' ', s)
cf9603be231749f55e4a29c92c90b9139515894c
697,998
def rename(**kwargs): """Rename one or more columns, leaving other columns unchanged Example usage: diamonds >> rename(new_name=old_name) """ def rename_columns(df): column_assignments = {old_name_later._name: new_name for new_name, old_name_later in kwargs.items()} return df.rename(columns=column_assignments) return rename_columns
0f4c87795f663a9cab8867c4b856ef75e59e2102
697,999
import optparse def comma_separated_callback(*, is_valid_func=lambda v: True, error_msg="{invalid} is not an allowed value"): """ Return an optparse callback for comma-separated args. Default value is not processed. Usage:: my_callback = comma_separated_callback( is_valid_func=lambda v: v in {'foo', 'bar'}, error_msg="{invalid} is not an allowed value for --option-name") op.add_option("--option-name", default=[], action='callback', type='string', callback=my_callback) """ def callback(option, opt, value, parser): """ optparse callback for comma-separated args """ values = value.split(',') for v in values: if not is_valid_func(v): msg = error_msg.format(value=value, invalid=v) raise optparse.OptionValueError(msg) setattr(parser.values, option.dest, values) return callback
3b7c1de63a7a29f0fc2f4a6ae7445d6e05075d32
698,001
from pathlib import Path def csv_path(csv_dir: str) -> Path: """Build csv Path object & mkdir""" pwd = Path.cwd() base = pwd.joinpath(csv_dir) base.mkdir(exist_ok=True) return base
52d894ca30b3dc9f81c65b09d3739d18eafee7ba
698,002
def addstrp(arg1, arg2): """ Comme addstr() au dessus mais ajoute un espace entre les deux si nécessaire Args: arg2: whatever, will be translated via str() arg1: whatever, will be translated via str() """ a = str(arg1) b = str(arg2) return u'{}{}{}'.format(a, ' ' if a and b else '', b)
fc161017d135daaa744bd9154a9669a8c7c634b1
698,005
def autofill(field, value): """ Return a bcm dictionary with a command to automatically fill the corresponding "field" with "value" """ return {'mode': 'autofill', 'field': field, 'value': value}
7b96ccd0e9756cdd6aab9919996b183be1478f8f
698,011
def valid_vlan_id(vlan_id, extended=True): """Validates a VLAN ID. Args: vlan_id (integer): VLAN ID to validate. If passed as ``str``, it will be cast to ``int``. extended (bool): If the VLAN ID range should be considered extended for Virtual Fabrics. Returns: bool: ``True`` if it is a valid VLAN ID. ``False`` if not. Raises: None Examples: >>> import pyswitch.utilities >>> vlan = '565' >>> pyswitch.utilities.valid_vlan_id(vlan) True >>> extended = False >>> vlan = '6789' >>> pyswitch.os.base.utilities.valid_vlan_id(vlan, extended=extended) False >>> pyswitch.os.base.utilities.valid_vlan_id(vlan) True """ minimum_vlan_id = 1 maximum_vlan_id = 4095 if extended: maximum_vlan_id = 8191 return minimum_vlan_id <= int(vlan_id) <= maximum_vlan_id
c046bf6c1e558eb679c9c53fa4e091213a1b7d46
698,012
import asyncio async def open_port(host, port): """Repeatedly try if a port on a host is open until duration seconds passed Parameters ---------- host : str Host IP address or hostname port : int Port number Returns ------- awaitable bool """ try: _reader, writer = await asyncio.wait_for( asyncio.open_connection(host, port), timeout=2 ) writer.close() await writer.wait_closed() return True except Exception: pass return False
9642cf112c2c3bb83d578c522d70880be064c827
698,017
from typing import Tuple def is_event(item: Tuple[str, str]) -> bool: """Check item from labelled list of folders if it is event folder. Args: item: item from labelled list of folders Returns: True if folder in tuple is event-type """ i_type = item[1] return i_type == "event"
2307813e7777955b11a5d84c7dbc114a8a690562
698,024
def _parse_arg_line(line): """ pull out the arg names from a line of CLI help text introducing args >>> _parse_arg_line(' -s, --sudo run operations with sudo (nopasswd) (deprecated, use') ['-s', '--sudo'] """ return [ part.strip().split(' ')[0].split('=')[0] for part in line.strip().split(' ')[0].split(',') ]
5d6bbddc43792ee974d0dfec4637046aff602052
698,025
def pyiterator_iter(this): """ Returns `this.` https://docs.python.org/3/library/stdtypes.html#typeiter says an iterator should have an __iter__ method. """ return this
5a44e38f6219f9ab0b629be817b0e2877c464149
698,026
def calculate_binomial_coefficient(n: int, k: int) -> int: """Calculate the binomial coefficient (n over k).""" if n < 0: raise ValueError('`n` must not be negative!') if k < 0: raise ValueError('`k` must not be negative!') if k > n: return 0 binomial_coefficient = 1 for i in range(k): binomial_coefficient *= (n - i) binomial_coefficient /= (1 + i) return round(binomial_coefficient)
31942eecaa10cb7a13e9cd38dd2daed8d0618b40
698,033
def isAnalysisJob(trf): """ Determine whether the job is an analysis job or not """ if (trf.startswith('https://') or trf.startswith('http://')): analysisJob = True else: analysisJob = False return analysisJob
75ccaf711dd04dc99aca266533fee7303fad3e85
698,038
def powset(S): """In : S (set) Out: List of lists representing powerset. Since sets/lists are unhashable, we convert the set to a list,perform the powerset operations, leaving the result as a list (can't convert back to a set). Example: S = {'ab', 'bc'} powset(S) -> [['ab', 'bc'], ['bc'], ['ab'], []] """ L=list(S) if L==[]: return([[]]) else: pow_rest0 = powset(L[1:]) pow_rest1 = list(map(lambda Ls: [L[0]] + Ls, pow_rest0)) return(pow_rest0 + pow_rest1)
8d5cbc09f595d81b4a486994cfbce2a0cf97cf15
698,039
def slice_dict(original_dict, key_string): """ :param original_dict: Original (and larger) dictionary with all entrances :param key_string: key string (desired starting string) :return: A new dictionary with all the entrances of the original that start with key_string """ newdict = {} for key in original_dict: if key.startswith(key_string): newdict[key] = original_dict[key] return newdict
812b5a45115127f71cd5f2ebc02ea35c2582c81f
698,040
def unique_name(container, name, ext): """Generate unique name for container file.""" filename = '{}.{}'.format(name, ext) i = 0 while filename in container: i += 1 filename = '{}.{}.{}'.format(name, i, ext) return filename
f2403611d58551fff65d996d426e458efd03ec80
698,042
import six def _is_sequence(seq): """Returns true if its input is a `tuple`, `list`, or `range`. Args: seq: an input sequence. Returns: True if the sequence is a `tuple`, `list`, or `range`. """ return isinstance(seq, (tuple, list, six.moves.range))
e94094c314cff5bf9bd7525453d7906ca55d7261
698,043
import re def AutoscalersForMigs(migs, autoscalers): """Finds Autoscalers with target amongst given IGMs. Args: migs: List of triples (IGM name, scope type, location reference). autoscalers: A list of Autoscalers to search among. Returns: A list of all Autoscalers with target on mig_names list. """ igm_url_regexes = [] for (name, scope_type, location) in migs: igm_url_regexes.append( '/projects/{project}/{scopeType}/{scopeName}/' 'instanceGroupManagers/{name}$' .format(project=location.project, scopeType=(scope_type + 's'), scopeName=getattr(location, scope_type), name=name)) igm_url_regex = re.compile('(' + ')|('.join(igm_url_regexes) + ')') result = [ autoscaler for autoscaler in autoscalers if igm_url_regex.search(autoscaler.target) ] return result
52f809794395ba6ed31b94bed36dc9f4de8ce919
698,047
def normalize_mode(mode): """ Return a mode value, normalized to a string and containing a leading zero if it does not have one. Allow "keep" as a valid mode (used by file state/module to preserve mode from the Salt fileserver in file states). """ if mode is None: return None if not isinstance(mode, str): mode = str(mode) mode = mode.replace("0o", "0") # Strip any quotes any initial zeroes, then though zero-pad it up to 4. # This ensures that somethign like '00644' is normalized to '0644' return mode.strip('"').strip("'").lstrip("0").zfill(4)
66d58de58eb9f0e77e2fa75253cfeef3ee1abe2c
698,048
import math def spherical_index_lm(k): """ returns the degree l and the order m from the mode k """ l = int(math.floor(math.sqrt(k))) return l, k - l*(l + 1)
de00ebda16ad2480b32a5d827351bb43ec858e42
698,049
def from_camel_case(name): """Convert camel case to snake case. Function and variable names are usually written in camel case in C++ and in snake case in Python. """ new_name = str(name) i = 0 while i < len(new_name): if new_name[i].isupper() and i > 0: new_name = new_name[:i] + "_" + new_name[i:] i += 1 i += 1 return new_name.lower()
c6e7184598252a6db1bcaee5d5375969c5c9bd39
698,053
def PatternListToStr(pattern): """Return a pattern string for the given list of integers. PatternListToStr([5,3,1]) -> '531' """ return ''.join([chr(p) for p in pattern])
d42f06204f3b4c6fa3badb45d5755614554b3f9b
698,056
import re def search_by_regex(nodes: list, option: int, regex: str) -> list: """ Return all :param regex matched values from :param nodes at :param option index. :param nodes: list of nodes. :param option: Index in the nodes list(check constants at the start of this file). :param regex: Pattern to be found. :return: Return list of matched values as list. """ answers = [] for item in nodes: if re.search(regex, item[option]): answers.append(item[option]) return answers
3a045c324dd333157a6de036ed7affd11a91b6a0
698,059
def get_retention_policy(interval, retention_policies): """Get appropriate retention policy for interval provided :param interval: Interval of query in seconds :type interval: int :param retention_policies: Retention policy configuration :type retention_policies: dict(max time range of interval in seconds: retention policy name) :rtype: ``str`` or ``None`` """ if not retention_policies: return for retention_interval in sorted(retention_policies.keys()): if interval <= retention_interval: return retention_policies[retention_interval] # In the case that desired interval is beyond configured interval range, # return policy for max interval return retention_policies[max(sorted(retention_policies.keys()))]
dc1e2358d715cadbcd5275240203fe20a50c1cc9
698,060
import warnings def extract_cube_at_time(cubes, time, time_extract): """ Extract a single cube at a given time from a cubelist. Args: cubes (iris.cube.CubeList): CubeList of a given diagnostic over several times. time (datetime.datetime object): Time at which forecast data is needed. time_extract (iris.Constraint): Iris constraint for the desired time. Returns: cube (iris.cube.Cube): Cube of data at the desired time. Raises: ValueError if the desired time is not available within the cubelist. """ try: cube_in, = cubes.extract(time_extract) return cube_in except ValueError: msg = ('Forecast time {} not found within data cubes.'.format( time.strftime("%Y-%m-%d:%H:%M"))) warnings.warn(msg) return None
24f6019f8a01a8d4b9c8f66c18c94eaee4f3077e
698,062
def kw2re(x): """Convert a list of keywords to a regex.""" return r'(%s)' % '|'.join(sorted(list(set(x))))
13127d8b0c6d1772ebd4be58aca0f9e3f160544a
698,063
def get_boundary_locations(size, sector_size, stride): """Get a list of 1D sector boundary positions. Args: size: length of the full domain. sector_size: length of the sector. stride: how far each sector moves to the right Returns: boundaries: a list of 1D sector boundary positions """ boundaries = [] sector_l, sector_r = 0, sector_size # left and right pos of the sector while sector_l < size: if sector_l < size and sector_r > size: boundaries.append((size - sector_size, size)) break else: boundaries.append((sector_l, sector_r)) if (sector_l, sector_r) == (size - sector_size, size): break sector_l += stride sector_r += stride return boundaries
ae6a2b6461f2c823f0129e7f691d63830834cbe7
698,067
def from_dero(value_in_dero): """Convert number in dero to smallest unit""" return int(value_in_dero*10**12)
6a27469721cbd9851312f73a971caf279c95ffa8
698,072
import hashlib def sha256_hash(data): """Compute SHA-256 of data and return hash as hex encoded value.""" data = data or b"" hasher = hashlib.sha256() hasher.update(data.encode() if isinstance(data, str) else data) sha256sum = hasher.hexdigest() return sha256sum.decode() if isinstance(sha256sum, bytes) else sha256sum
88443a9a62080b9e17c6a4eb4bf68ca7b8f62e6c
698,074
import json def load_fixture_json(name): """Load fixture from json file.""" with open(f"tests/fixtures/{name}.json", encoding="UTF-8") as json_file: data = json.load(json_file) return data
31f064da134974380ee12341199aef4d05bdba99
698,075
def fetch_token_mock(self, token_url=None, code=None, authorization_response=None, body='', auth=None, username=None, password=None, method='POST', timeout=None, headers=None, verify=True, proxies=None, **kwargs): """Mock token fetching api call.""" token = { "orcid": "123", "name": "ros", "access_token": "xyz", "refresh_token": "xyz", "scope": ["/activities/update", "/read/limited"], "expires_in": "12121" } return token
8c6902272140ffe498ac52c69564d351c6257b2f
698,079
import math def valid(f): """Checks if the given coordinate is valid Args: f (float): Coordinate of a HeightCoordinate Returns: boolean: whether or not the coordinate is valid """ return math.isfinite(f)
38753057630cb2581ec1752d672e9a5c76162066
698,082
def _pointed_type(tp): """Return the pointed type if this is a pointer, otherwise None.""" tp_nam = tp.__name__ if tp_nam.startswith("LP_"): if tp_nam.startswith("LP_c_") or tp_nam.startswith("LP_4_") or tp_nam.startswith("LP_8_"): return tp_nam[5:] return None if tp_nam == "c_void_p": return "void" return None
bcd92c3b824016c552c77627cd9ec8b411179a26
698,085
def make_image_carousel_column(image_url=None, image_resource_id=None, action=None, i18n_image_urls=None, i18n_image_resource_ids=None): """ Create a image carousel column object. reference - https://developers.worksmobile.com/jp/document/100500809?lang=en :return: carousel column """ column_data = {} if image_url is not None: column_data["imageUrl"] = image_url if image_resource_id is not None: column_data["imageResourceId"] = image_resource_id if action is not None: column_data["action"] = action if i18n_image_urls is not None: column_data["i18nImageUrls"] = i18n_image_urls if i18n_image_resource_ids is not None: column_data["i18nImageResourceIds"] = i18n_image_resource_ids return column_data
7abf182f2255d0486136dcbe64ea8de4a63146c9
698,086
import hashlib def md5_(value: str) -> str: """ A function to return the md5 hash of the given string. :param value: The string to hash. :return: The hashed string. """ return str(hashlib.md5(value.encode()).hexdigest())
e86553ab1c57f48f5e972df1e7abb5237f4d3972
698,089
def idFormat(id_num): """Format a numeric id into 5-digit string. Paramters --------- id_num: str A unique string number assigned to a User or Request. """ if len(id_num) == 1: id_num = "0000" + id_num elif len(id_num) == 2: id_num = "000" + id_num elif len(id_num) == 3: id_num = "00" + id_num elif len(id_num) == 4: id_num = "0" + id_num return id_num
a3afb52dd552fe7eb6e971bd3f889bb114ee505c
698,093
def next_multiple(query, multiple): """Get the next multiple Args: query (int): To test multiple (int): Divider Returns: int: Next multiple of divider """ result = query while result % multiple: result += 1 return result
e76e59e94e7ac72dbcf86a6c99a17229acc7a15d
698,094
def change_dict_structure(dict_list): """Takes list of dicts from db_query and changes to dict with key=id, value = text (used for metrices). Args: dict_list (list): List of dictionaries from db_query. Returns: texts (dictionary): Dictionary with document IDs as keys and document text as values. """ texts = {} for dict in dict_list: doc_id = dict.get('document_id') text = dict.get('fulltext_cleaned') texts.update({doc_id: text}) return texts
1e31055606a692f4e743a61f8a394134b9e72126
698,096