content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def addv3s(vec, scalar): """add scalar to elements of 3-vector""" return (vec[0]+scalar, vec[1]+scalar, vec[2]+scalar)
bd14f04079af113a4e9e900457b50b60f7fc7d00
502,121
def get_rod(da_peak_values, da_peak_times, da_eos_values, da_eos_times): """ Takes four xarray DataArrays containing the peak season values (either pos or mos) and times (day of year), and end of season (eos) values and times (day of year). The rate of decrease (rod) is calculated as the ratio of the difference in peak and eos for vege values and time (day of year) per pixel. Parameters ---------- da_peak_values: xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the vege value detected at either the peak (pos) or middle (mos) of season. da_peak_times: xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the time (day of year) value detected at either the peak (pos) or middle (mos) of season. da_eos_values: xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the vege value detected at end of season (eos). da_eos_times: xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the time (day of year) detected at end of season (eos). Returns ------- da_roi_values : xarray DataArray An xarray DataArray type with an x and y dimension (no time). Each pixel is the rate of decrease value detected between the eos and peak values/times across the timeseries at each pixel. The values in rod represents rate of vege decline. """ # notify user print('Beginning calculation of rate of decrease (rod) values (times not possible).') # get abs ratio between the difference in peak and eos values and times print('> Calculating rate of decrease (rod) values.') da_rod_values = abs((da_eos_values - da_peak_values) / (da_eos_times - da_peak_times)) # convert type da_rod_values = da_rod_values.astype('float32') # rename vars da_rod_values = da_rod_values.rename('rod_values') # notify user print('> Success!\n') return da_rod_values
78e5b4e6119f193a59127cb8c3e6c36245c37bed
426,597
def line_count(file): """Utility function for getting number of lines in a text file.""" count = 0 with open(file, encoding="utf-8") as f: for _ in f: count += 1 return count
e6ee52e5458b9a4c354b3a0f498613392a00b54d
607,592
def unique_elements(values): """List the unique elements of a list.""" return list(set(values))
d272d49f5d93326c6a79cfc49b4650acf4d85332
240,261
import warnings def _bin_phydim(dr, dim_name, bin_bound, bin_center, stacked_dim_name): """Binning over a physical dimension, weighted by area. Internal function for taking zonal or meridonal average. Parameters ---------- dr : xarray DataArray Must contain (tile,y,x) as dimensions. Can have arbitrary additional dimensions. dim_name : str support 'lat' or 'lon' bin_bound : 1D array (in degree) The boundaries of bins in longitude or latitude bin_center : 1D array (in degree) The center of bins in longitude or latitude stacked_dim_name : str Will be 'stacked_tile_y_x' if dimension names are (tile,y,x) Returns ------- dr_binned : xarray DataArray """ # use multi-dimensional groupby, with 1D bins group = dr.groupby_bins(dim_name, bin_bound, labels=bin_center) # select the algorithm depending on the existence of 'area' if 'area' in dr: # take weighted average # a MapReduce algorithm that minimizes the "Reduction" operations # that's the fastest way I can find for taking weighted average # It is 10x faster than the algorithm on xarray online docs: # http://xarray.pydata.org/en/stable/examples/monthly-means.html def mean_weighted_by_area(dr): weights = dr['area']/dr['area'].sum() dr_mean = (dr*weights).sum(dim=stacked_dim_name) return dr_mean dr_binned = group.apply(mean_weighted_by_area) else: # simply take unweighted average warnings.warn("Use uniform weights because the input DataArray " "does not have area as a coordinate variable. " "The result could be inaccurate because " "Cubed-sphere box size varies." ) dr_binned = group.mean(dim=stacked_dim_name) return dr_binned
43a8c2aa101ec97953820bc4230cf2a6b8b47baa
394,398
def abs_to_rel_xpath(xpath, new_root): """ Convert a given xpath to be relative from a tag appearing in the original xpath. :param xpath: str of the xpath to convert :param new_root: str of the tag from which the new xpath should be relative :returns: str of the relative xpath """ if new_root in xpath: xpath = xpath + '/' xpath_to_root = '/'.join(xpath.split(new_root + '/')[:-1]) + new_root xpath = xpath.replace(xpath_to_root, '.') xpath = xpath.rstrip('/') else: raise ValueError(f'New root element {new_root} does not appear in xpath {xpath}') return xpath
4b23e49e58ec854f758744bc625546642394e3ba
214,967
import time def slowadd(a,b): """Add two numbers, poorly.""" time.sleep(1) return a + b
b6a440b9054f1808a9656b3badc3905519e5565a
437,695
import copy def _instances_by_namespace(data): """Rebuild instance data so we can look it up by namespace. Note that the `representation` is added into the instance's data with a `representation` key. Args: data (dict): scene build data Returns: dict """ result = {} # Add new assets for representation_id, instances in data.items(): # Ensure we leave the source data unaltered instances = copy.deepcopy(instances) for instance in instances: instance['representation'] = representation_id result[instance['namespace']] = instance return result
094fdb39011688cf7b7c5e6e3608f46c615c4de6
391,375
def get_all_coords_in_image(length_x, length_y): """ Get a list of all possible coordinates, based on two numbers representing a rectangular grid :param length_x: horizontal size of field :param length_y: vertical size of field :returns: list of all coordinate pairs in a field of size length_x x length_y :raises TypeError: none """ if length_x and length_y: new_coord_list = [] for spot_y in range(length_y): for spot_x in range(length_x): new_coord_list.append((spot_x, spot_y)) # print(new_coord_list) return new_coord_list else: return False
8ee98706cf38e53f7519cfa75df800713e8bc02e
547,538
def clean_antibody_name(name): """Get clean antibody name from FASTA identifier - Remove chain type suffix such as "_VH" or "_VL" """ for suffix in ['_VH', '_VL', '_HC', '_LC']: if name.endswith(suffix): name = name[:-len(suffix)] return name
2a9fbf186a419bca52c8222b09689ca8bd17a9c2
124,080
import json def read_json(json_path: str): """Read a json file :param json_path: path to json file :return: json content """ with open(json_path) as f: return json.load(f)
19449679799ccfcec2e5f6fcaecf1f9ca8d10785
95,832
def n_triples( g, n=None ): """ Prints the number of triples in graph g """ if n is None: print(( ' Triples: '+str(len(g)) )) else: print(( ' Triples: +'+str(len(g)-n) )) return len(g)
42cd1bb9e5e737a01900ca272c3a46ee7eedceed
563,727
def get_valid_colors() -> list: """ Returns list of valid colors (BGR) for markers and frames """ colors = [ (0, 0, 200), (0, 200, 0), (200, 0, 0), (200, 130, 0), (200, 0, 200), (0, 200, 200), ] return colors
860a378e38b54a8b897ce4228035a35e5170d056
554,474
def strip_name_amount(arg: str): """ Strip the name and the last position integer Args: arg: string Returns: string and integer with the default value 1 """ strings = arg.split() try: first = ' '.join(strings[:-1]) second = int(strings[-1]) except (ValueError, IndexError): first = ' '.join(strings) second = 1 return first, second
034ca8c780b9e837f6c3c5f2a1bf0d58dbc77e9b
87,050
def tensor2scalar(x): """Convert torch.Tensor to a scalar value. Args: x (torch.Tensor): Returns: scaler """ if isinstance(x, float): return x return x.cpu().detach().item()
37052da89febc677aa93559e4aa167751ea65792
168,363
def create_default_config(schema): """Create a configuration dictionary from a schema dictionary. The schema defines the valid configuration keys and their default values. Each element of ``schema`` should be a tuple/list containing (default value,docstring,type) or a dict containing a nested schema.""" o = {} for key, item in schema.items(): if isinstance(item, dict): o[key] = create_default_config(item) elif isinstance(item, tuple): value, comment, item_type = item if isinstance(item_type, tuple): item_type = item_type[0] if value is None and (item_type == list or item_type == dict): value = item_type() if key in o: raise KeyError('Duplicate key in schema.') o[key] = value else: raise TypeError('Unrecognized type for schema dict element: %s %s' % (key, type(item))) return o
06f51a864cf25c9c4028d8355095173a27732da2
606,818
def event_asset_object_factory(event_id, asset_id): """Cook up a fake eventasset json object from given ids.""" eventasset = { 'event_id': event_id, 'asset_id': asset_id } return eventasset
547ee889edece073ee7d34b8926add327121a1a1
197,352
def JoinDisjointDicts(dict_a, dict_b): """Joins dictionaries with no conflicting keys. Enforces the constraint that the two key sets must be disjoint, and then merges the two dictionaries in a new dictionary that is returned to the caller. @type dict_a: dict @param dict_a: the first dictionary @type dict_b: dict @param dict_b: the second dictionary @rtype: dict @return: a new dictionary containing all the key/value pairs contained in the two dictionaries. """ assert not (set(dict_a) & set(dict_b)), ("Duplicate keys found while joining" " %s and %s" % (dict_a, dict_b)) result = dict_a.copy() result.update(dict_b) return result
d42600de9646b8eda5f94cce5961754eb72daa2d
678,742
def inverse(phi, e): """ Use the Bezout law to calculate the inverse of e to the modulus of phi. :param phi: :param e: :return: an integer d st. d*e = 1 (mod phi) """ s, t, sn, tn, r = 1, 0, 0, 1, 1 while r != 0: q = phi // e r = phi - q * e st, tt = sn * (-q) + s, tn * (-q) + t s, t = sn, tn sn, tn = st, tt phi = e e = r return t
a77ff16c5aa9c3ce2d76e6427fd0e399b08b8d7f
522,307
def exc_repr(e): """ Return a string representation of an exception together with the excepion name. """ return "{}: {}".format(type(e).__name__, str(e))
16c1bdf4ec2cdab6681faced2205331a247d9f56
532,535
def elevation_change(elevation): """ :param elevation: A list of elevation points (floating point format) Description: Calculates and returns the differences in elevation between two points. This is completed for all sequential pairs in a list. Returns deltaElev which is a listing of elevation changes. This has a length of len(elevation) -1 """ deltaElev =[] for i in range (0, len(elevation)-1): deltaElev.append((elevation[i+1]-elevation[i])) return deltaElev
fad93636979098f492355da595a4972579ee95b5
571,391
import math def roundUp(number): """ Round up an integer to the nearest power of then of the same degree. Exempel: 35 -> 40, 540 -> 600, 1250 -> 2000, etc """ nearest = math.pow(10,len(str(number))-1) return int(math.ceil(number / nearest) * nearest)
94405e4e40e05a33b1d3e48efdf9574355b53ea3
284,983
def find_perimeter(height: int, width: int) -> int: """Find the perimeter of a rectangle.""" return (height + width) * 2
75913101034c873743aefb53540d5c0884d162f1
22,441
def err_func(chromosome, dataset, rbfn): """Calculate the error function for each chromosome. This function is specially designed to be pickable for multiprocessing. Args: chromosome (list of floats): The chromosome which is the parameters of RBFN model. dataset (list of TrainingData): The training dataset. rbfn (RBFN): The RBFN model which must be deep copied for different parameters in output calculation. Returns: float: The result of fitting function. """ rbfn.load_model(chromosome) res = sum(abs(d.o - rbfn.output(d.i, antinorm=True)) for d in dataset) return res / len(dataset)
680bba374836ff9da0614e193cdf5d14e6ccf1c6
167,004
from typing import Iterable from typing import Any def filter_array(iterable: Iterable[Any], *types) -> Iterable[Any]: """Return the iterable with only the select types inside. Parameters ---------- iterable : Iterable[Any] The iterable to filter. types: tuple The types to filter for. Returns ------- list The filtered iterable. """ return list(filter(lambda x: type(x) in types, iterable))
71a9b6736507be2cc235768e8114e95bb66c867d
157,982
from typing import Any import tempfile def _write_temp_file(data: Any) -> tempfile._TemporaryFileWrapper: # type: ignore """ Writes a named temporary file with the given data. Returns: [TemporaryFileWrapper] -- a file handle to the created temp file """ tmp = tempfile.NamedTemporaryFile(delete=False) tmp.write(data) tmp.close() return tmp
a47d75b016e94eb2f296c5999826f37b1a8ddaf6
346,200
import torch def batch_svd(A): """Wrapper around torch.svd that works when the input is a batch of matrices.""" U_list = [] S_list = [] V_list = [] for i in range(A.shape[0]): U, S, V = torch.svd(A[i]) U_list.append(U) S_list.append(S) V_list.append(V) U = torch.stack(U_list, dim=0) S = torch.stack(S_list, dim=0) V = torch.stack(V_list, dim=0) return U, S, V
650472f2616a65c0bd0774dc4aa527b82292d163
480,589
def select_backbone(node): """ Returns True if `node` is in a protein backbone. """ return node.get('atomname') == 'BB'
d4924ce67b251bb39b7df5097d0e0175c24ff7cf
365,953
def sqlfstr(s: str): """ Returns the string <s> as string format used for SQL parameter input. :param s: String to reformat. :return: str """ return s.replace('"', '""')
70382cb58c5fa4433619d938076a57cf408af9f3
494,271
def _same_cluster(site_a, site_b, clusters): """ Check if sites are in the same cluster """ a_index = [site_a in cluster for cluster in clusters].index(True) b_index = [site_b in cluster for cluster in clusters].index(True) return a_index == b_index
5f47b68148e4c7209989971a80ff4cb1d6718161
597,999
def required(sfx=''): """ Load the requirements from the requirements.txt file""" with open(f"requirements{sfx}.txt") as f: return [ln.strip() for ln in f.readlines() if not ln.startswith('-') and not ln.startswith('#') and ln.strip() != '']
2a6ad70acc1012f0cf7dea21e7752819e0230323
431,796
import hashlib def calculate_identicon(user_id: str) -> str: """ Calculate an identicon hash string based on a user name. :param user_id: the user name :return: an identicon string """ return hashlib.sha256(user_id.encode()).hexdigest()
e22e817da8a38ab289e4c623f8cbcba370317223
26,685
def _endpoint_from_view_func(view_func): """Internal helper that returns the default endpoint for a given function. This always is the function name. """ assert view_func is not None, 'expected view func if endpoint ' \ 'is not provided.' return view_func.__name__
abba226b1279fc3b14fb22e2ea52980ac63f5f8a
341,040
def score_by_accuracy(truth, predictions, cutoff=0.5, binder_weight=0.5): """ Score a set of predictions by their accuracy. Parameters ---------- cutoff : float the value separating 'binders' predictions and 'nonbinder' predictions (defaults to 0.5) - Predictions are considered accurate if they land on the same side of the cutoff value as the truth. binder_weight : float the fraction that the binder score contributes to the overall score - The prediction accuracy for binders and nonbinders is considered separately and then combined according to this weight. Returns ------- float a score between 0 and 1 """ correctness = [(t > cutoff) == (p > cutoff) for (t, p) in zip(truth, predictions)] def score_for_truth(truth_value): filtered = [ c for (c, t) in zip(correctness, truth) if t == truth_value ] return sum(1 for c in filtered if c) / len(filtered) if filtered else 0 return score_for_truth(1) * binder_weight + score_for_truth(0) * ( 1 - binder_weight)
39f19eb0adedf9e754012e9efa7c924b6d3b3bcf
205,533
def get_completed_only(mxml_df): """ Filter only completed activities """ complete = mxml_df[mxml_df.EventType == "complete"].rename({ "Timestamp": "Timestamp_Complete" }, axis=1).set_index(["TraceId", "WorkflowModelElement"]) return complete.drop(["Originator", "EventType"], errors='ignore', axis=1).reset_index()
8da852d6cbd1bb915581a53d05501b495e50faaf
628,560
import csv def csv_readline_to_list(csv_file): """ Read the CSV content by line. Example: csv content: |name|age| | a |21 | | b |22 | | c |23 | | d |24 | | e |25 | ret = csv_readline_to_list("d/demo.csv") Return: [['name', 'age'], ['a', '21'], ['b', '22'], ['c', '34'], ['d', '24'], ['e', '25']] """ try: with open(csv_file, 'r', encoding="utf-8") as csv_fp: reader = csv.reader(csv_fp) rows = [row for row in reader] except UnicodeDecodeError: with open(csv_file, 'r', encoding="gbk") as csv_fp: reader = csv.reader(csv_fp) rows = [row for row in reader] except IOError as ex: raise Exception(ex) return rows
7e6359df416174eb501868845edfbbcb5fe00e57
661,598
from typing import List def intersects(list1: List[object], list2: List[object]) -> bool: """ Returns True if two lists of objects intersect. :param list1: the first list. :param list2: the second list. :return: True iff. they intersect. """ for e1 in list1: if e1 in list2: return True return False
bdc7dfd81e646f5242cb756781a0673d7e122ab3
423,631
def first_not_none_param(params, default): """ Given a list of `params`, use the first param in the list that is not None. If all are None, fall back to `default`. """ for param in params: if param is not None: return param return default
c219e5b3606683dbc84ee6fe1124833304e6fa9b
621,217
import re def __extract_url_from_text(plain_text): """ Extract the first occurence URL in a plain text """ if plain_text: results = re.findall(r'(https?://[^\s]+)', plain_text) if results and len(results) > 0: return results[0] else: return "" else: return ""
b128bdd86aa6648cf64e6c43d05f6897983ca164
226,490
def melt( frame, id_vars=None, value_vars=None, var_name=None, value_name="value", col_level=None, ignore_index: bool = True, ): # noqa: PR01, RT01, D200 """ Unpivot a DataFrame from wide to long format, optionally leaving identifiers set. """ return frame.melt( id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level, ignore_index=ignore_index, )
fed5581a8c0fde4fef8a178cffb8545b0d928108
549,619
def vol_format(x): """ Formats stock volume number to millions of shares. Params: x (numeric, like int or float)): the number to be formatted Example: vol_format(10000000) vol_format(3390000) """ return "{:.1f}M".format(x/1000000)
8da20a24c6c6373f271e6f0e94cf8205cb576cfc
118,852
def is_binary_palindrome(num): """Return True if num is binary palindromic.""" num = bin(num) num = num[2:] return num == num[::-1]
7af06b050063fc5b4c2fcfd576f2603a8487067d
195,678
def get_file_type(file_name: str) -> str: """ Returns "test" if file_name contains the string "test" else returns "Production". """ result = "Production" if ("test" in file_name or "Test" in file_name): return "Test" return result
376ffc276d1bbd7548a34f60c39e9f079927c4ad
336,486
def match_indices(shortened_list, primary_list): """ Returns the 'primary_list's indices that correspond to the matching values in the 'shortened_list'. Assumes all values are unique. (For use in the External CTE monitor, matches are between RA and Decs and for a given source we can assume uniqueness.) Parameters: shortened_list : list of anything A shortened 'primary_list' of whose values you want the 'primary_list's indices. Assumes all values are unique. primary_list : list of anything The original list of which the 'shortened_list' is a subset. Assumes all values are unique. Returns: matched_indices : list of ints The 'primary_list's indices that correspond to the matching values in the 'shortened_list'. Outputs: nothing """ matched_indices = [i for i,x in enumerate(primary_list) if x in shortened_list] return matched_indices
899a04ba5fa68ed2485b158d54ea5558a9a507e1
507,510
import requests from bs4 import BeautifulSoup def request_page_data_soup(page_url): """ Method that uses the library requests and BeautifulSoup to get data from page at given page_url. :param string page_url: given page url. :return BeautifulSoup: returns BeautifulSoup object with data from collected page. """ page = requests.get(page_url) page_soup = BeautifulSoup(page.content, 'html.parser', multi_valued_attributes=None) return page_soup
4818a8f5b9849500bc540a76a9886c681d95e4f0
213,575
def create_model(model_name, gpu=False): """ Creates model object from the model_name. :param model_name: (str) the name of the model (corresponding exactly to the name of the class). :param gpu: (bool) if True a gpu is used. :return: (Module) the model object """ try: model = eval(model_name)() except NameError: raise NotImplementedError( 'The model wanted %s has not been implemented.' % model_name) if gpu: model.cuda() else: model.cpu() return model
b2692929fa34facf0a7add35161cd0133ebbd8e3
122,195
def fetch_sessions(job_config, data_bucket, data_dir, course, fetch_holdout_session_only = False, fetch_all_sessions = False): """ Fetch course sessions in data_bucket/data_dir. By default, fetches only training sessions (not holdout session). :param job_config: MorfJobConfig object. :param data_bucket: name of bucket containing data; s3 should have read/copy access to this bucket. :param data_dir: path to directory in data_bucket that contains course-level directories of raw data. :param course: string; name of course (should match course-level directory name in s3 directory tree). :param fetch_holdout_session_only: logical; return only holdout (final) session. :param fetch_all_sessions: logical; return all sessions (training and holdout). :return: list of session numbers as strings. """ assert (not (fetch_holdout_session_only & fetch_all_sessions)), "choose one - fetch holdout sessions or fetch all sessions" s3 = job_config.initialize_s3() if not data_dir.endswith("/"): data_dir = data_dir + "/" course_bucket_objects = s3.list_objects(Bucket=data_bucket, Prefix="".join([data_dir, course, "/"]), Delimiter="/") sessions = [item.get("Prefix").split("/")[2] for item in course_bucket_objects.get("CommonPrefixes")] sessions = sorted(sessions, key = lambda x: x[-3:]) # handles session numbers like "2012-001" by keeping leading digits before "-" but only sorts on last 3 digits if fetch_all_sessions: # return complete list of sessions result = sessions else: holdout_session = sessions.pop(-1) if fetch_holdout_session_only == True: result = [holdout_session] # return only holdout session, but as a list, so type is consistent else: result = sessions # return list of sessions without holdout session return tuple(result)
e1fb2cb74f466b1b886d019ea1c6d4f15a6617cd
165,961
import torch def _collate_fn_tensor(x): """ Collate function for tensors. Parameters ---------- x : `List` of `torch.Tensor` Tensors to be stacked. Returns ------- x : `torch.Tensor` Output tensor. """ return torch.stack(x)
418aefa9e5057624489a0e8e874c40ca403cb7d1
128,449
def check_input_stream_count(expected_number_of_streams): """ Decorator for Tool._execute that checks the number of input streams :param expected_number_of_streams: The expected number of streams :return: the decorator """ def stream_count_decorator(func): def func_wrapper(*args, **kwargs): self = args[0] sources = kwargs['sources'] if 'sources' in kwargs else args[1] if expected_number_of_streams == 0: if sources: raise ValueError("No input streams expected") else: given_number_of_streams = len(sources) if sources else 0 if given_number_of_streams != expected_number_of_streams: raise ValueError("{} tool takes {} stream(s) as input ({} given)".format( self.__class__.__name__, expected_number_of_streams, given_number_of_streams)) return func(*args, **kwargs) return func_wrapper return stream_count_decorator
96dfdc8f85d70dee1ac44f01f95dd07eb3725261
20,508
import re def _clean(text): """ Cleans the text: Lowercasing, trimming, removing non-alphanumeric""" return " ".join(re.findall(r'\w+', text, flags=re.UNICODE)).lower()
ff3496a0fcae32ed1ecb7ceb106bbe60c1da25ea
652,016
def str2num (text:str)->float: """ Convert string to number. Keyword arguments: text -- number in string format Output: number -- number converted """ assert text.isnumeric(),"Error: The string can not be converted to a number" number = float(text) return number
22db18949633092f12e1cafc7470f07e821fd4a4
642,032
def parse_error(bad_token): """Returns an error message and the token causing it """ return {"error": f"parsing error, invalid token [{bad_token}] found"}
035875fd3949425c14f67ac98246fad263b1593c
560,977
def create_dicts_by_chain(keys_chain: list): """ Create nested dicts by keys chain >>> create_dicts_by_chain(['some', 'keys']) {'some': {'keys': {}}} """ result = {} current_dict = result for key in keys_chain: current_dict[key] = {} current_dict = current_dict[key] return result
03ab93fbe62a59e539fd150ab1277c4377fd6739
663,590
def is_threatening(x1: int, y1: int, x2: int, y2: int) -> bool: """ Check if the positions are threatening each other. Examples -------- >>> is_threatening(0, 1, 1, 0) True """ same_row = x1 == x2 same_col = y1 == y2 delta1 = min(x1, y1) major_coords1 = (x1 - delta1, y1 - delta1) delta2 = min(x2, y2) major_coords2 = (x2 - delta2, y2 - delta2) same_diagonal_major = major_coords1 == major_coords2 delta1 = x1 delta2 = x2 minor_coords1 = (x1 - delta1, y1 + delta1) minor_coords2 = (x2 - delta2, y2 + delta2) same_diagonal_minor = minor_coords1 == minor_coords2 same_diagonal = same_diagonal_major or same_diagonal_minor return same_row or same_col or same_diagonal
90779ad3268aeb5eeb1962ac3623904694f5d4bb
127,707
import csv def load_environment_data(filename): """ Loads the environment boundaries and obstacles from a text file :param filename: path and name to the file with robot state information :return: lists of tuples of coordinates for the environment boundaries and obstacles """ environment_bounds = list() obstacles = list() with open(filename, 'r', encoding='utf8') as fin: reader = csv.reader(fin, skipinitialspace=True, delimiter=',') raw_bounds = next(reader) while raw_bounds: x_coordinate = int(raw_bounds.pop(0)) y_coordinate = int(raw_bounds.pop(0)) coordinate = (x_coordinate, y_coordinate) environment_bounds.append(coordinate) for raw_obstacle in reader: temporary_obstacle = list() while raw_obstacle: x_coordinate = float(raw_obstacle.pop(0)) y_coordinate = float(raw_obstacle.pop(0)) coordinate = (x_coordinate, y_coordinate) temporary_obstacle.append(coordinate) obstacles.append(temporary_obstacle) return environment_bounds, obstacles
aae82786b2c2a274cb52f9b025442eaa889d61e1
152,636
import json def load_int_key_json2dict(f): """Read JSON file to dict, converting keys to int.""" d = {} with open(f, 'r', encoding='UTF-8') as json_file: d = {int(k): v for k, v in json.load(json_file).items()} return d
b37a9e045b677eeb62a6a91f94d7e2143d30683f
156,584
import torch def local_response_normalization(x, eps=1e-8): """ Implements the variant of LRN used in ProGAN https://arxiv.org/pdf/1710.10196.pdf :param eps: Epsilon is a small number added to the divisor to avoid division by zero :param x: Output of convolutional layer (or any other tensor with channels on axis 1) :return: Normalized x """ divisor = (torch.pow(x, 2).mean(dim=1, keepdim=True) + eps).sqrt() b = x/divisor return b
5d2fc78a9d3360a7e97b1c256965069d2422f392
387,401
import inspect def get_arg_spec(function, follow_wrapped=False) -> inspect.FullArgSpec: """ Get the arg spec for a function. :param function: A function. :param follow_wrapped: Follow `__wrapped__`, defaults to False. :return: A :class:`inspect.FullArgSpec` """ if follow_wrapped: function = inspect.unwrap(function) return inspect.getfullargspec(function)
e3e397ddbcfc341c0e02c06d693fa04ea65e6cb5
119,378
def getShiftedString(s, leftShifts, rightShifts): """ Generate the string after the following operations 1. Left Circle Shift 2. Right Circle Shift :type s: string :type leftShifts: int :type rightShifts: int Examples: >>> getShiftedString("abc",1,0) 'bca' >>> getShiftedString("abc",1,7) 'abc' """ # Limit Huge Shifts sLen = len(s) leftShifts = leftShifts % sLen rightShifts = rightShifts % sLen # Generate Left shifted string leftS = s[leftShifts:] + s[:leftShifts] # Generate Right shifted string rightS = leftS[-rightShifts:] + leftS[0:-rightShifts] return rightS
151e5d622d1677c672bd084c9e6e6ec2f61f3d32
440,631
def fill(character, data_qubits): """ Apply a specific gate to all data qubits Args: character: The QASM gate to apply data_qubits: The number of data qubits Returns: Valid QASM to append to the program """ # create a list of the qubit indices that need the gate applied indices = ",".join(map(str, range(data_qubits))) return "{} q[{}]\n".format(character, indices)
fbdfd994e7511f7f8ff4a0a1adb21f1a5742b5b3
560,411
def unpack_score(score, **kwargs): """ Pulls specific model score out of score json. If perspective can't score text, 0 is used. Args: score(dict): complete score json for a tweet kwargs: name of the model to unpack score Returns: model_score: integer that represents percentage score for a given model """ model_name = kwargs.get('model_name') if 'attributeScores' in score: model_score = round( score['attributeScores'][model_name]['summaryScore']['value'] * 100) else: model_score = 0 return model_score
ef550019b1000687deee0274bab876973c07207c
452,457
def _cmp_dispatcher(other_method_name): """ Dispatch comparisons to a method of the *other* object. Returns a new *rich comparison* method which dispatches calls to the method `other_method_name` of the *other* object. If there is no such method in the object, ``False`` is returned. This is part of the implementation of a double dispatch pattern. """ def dispatched_cmp(self, other): try: other_method = getattr(other, other_method_name) except AttributeError: return False return other_method(self) return dispatched_cmp
925cfea1c328365d2a91f92b2c3fecebc06ce7a5
334,547
def close_to_obstacle(state): """ Return true or false depending if we're within a certain distance of an obstacle. """ cutoff_dist = 0.5 closest_obstacle = min(state[0]) if (closest_obstacle < cutoff_dist): return True return False
1fb7abfcd58d4d228c37cec5acfbd6d6882cbd93
205,138
def convert_band(old_band): """ Convert an old band string into a new string. NOTE: Only exists to support old data formats. """ if old_band: old_band = old_band.strip() # Strip off superfluous white space if old_band == 'A': return 'SHORT' elif old_band == 'B': return 'MEDIUM' elif old_band == 'C': return 'LONG' else: # For any other value, return the same string return old_band
c36c55daceb91ed201e45e9b4cce7cd7b8c8d085
246,794
def xor( a, b ): """Compute the boolean xor of two values""" return bool( a ) != bool( b )
6679f67a939fb0f18c40085fc9ef1d478082c670
403,170
def get_strains(output_file): """ Returns a dictionary that maps cell id to strain. Takes Biocellion output as the input file. """ strain_map = {} with open(output_file, 'r') as f: for line in f: if line.startswith("Cell:"): tokens = line.split(',') cell = int(tokens[0].split(':')[1]) strain = int(tokens[1].split(':')[1]) strain_map[cell] = strain return strain_map
a4ce24cd0f4cb213ee42e611f178b1415a5506be
682,758
from typing import Any def merge_dict(*dicts: dict[str, Any]) -> dict[str, list[str]]: """ Merge dicts and return a dictionary mapping key to list of values. Order of the values corresponds to the order of the original dicts. """ ret: dict[str, list[str]] = {} for dict_ in dicts: for key, val in dict_.items(): ret.setdefault(key, []).append(val) return ret
3b0fca7160915610a39777237ac794fce0734d3f
329,269
import re def clean_name(name: str) -> str: """ Bring the name into a standard format by replacing multiple spaces and characters specific for German language """ result = re.sub(r"\s+", " ", name) return ( result.replace("ß", "ss") .lower() .replace("ä", "ae") .replace("ü", "ue") .replace("ö", "oe") )
8cb8ba45fcec1dcc0e04ccfcd4263ae3e82e9fb5
28,370
def recursive_update(original_dict: dict, new_dict: dict) -> dict: """Recursively update original_dict with new_dict""" for new_key, new_value in new_dict.items(): if isinstance(new_value, dict): original_dict[new_key] = recursive_update( original_dict.get(new_key, {}), new_value ) else: original_dict[new_key] = new_value return original_dict
5c85e5fc14571fdffb88f4c4822b7e369b790bfa
693,328
def vertical_unfold(A): """ For a 3D tensor A(a,i,b), we unfold like: A(ia,b) """ S = A.shape return A.permute(1, 0, 2).reshape(S[0] * S[1], S[2])
30e469715fb79e42420e344f91cac0b9ab791497
327,817
def check_response(resp_dict): """Analyzes the dictionary response from an API call, 'resp_dict'; if the response indicates an error, raise an appropriate Exception, otherwise return 'resp_dict'. """ if resp_dict['status'] == 'success': return resp_dict elif resp_dict['status'] == 'fail': raise ValueError(f"Bad Argument: {resp_dict['data']}") else: raise RuntimeError(f"Error at server: {resp_dict['message']}")
ad63f0fa3064ab3edb00a18848c92dbcfc91d2aa
224,286
def chop(seq, size): """Chop a sequence into chunks of the given size.""" chunk = lambda ii: seq[ii:ii + size] return map(chunk,range(0, len(seq), size))
958e6523fba9ec8097c242fbe8f7c419c1dce8b8
183,807
def Rmax_Q11(Vmax): """ Estimation of the radius of maximum wind according to the formula proposed by Quiring et al. (2011); Vmax and Rmax are in nautical miles. Expression herein converted in km""" Vm= Vmax * 0.5399568 Rmax = ((49.67 - 0.24 * Vm)) * 1.852 return Rmax
e320acfd64abc9e7ae30ca70979cf057239bae09
699,855
def get_words_by_start_time(transcript): """Merges punctuation with standard words since they don't have a start time, returns them in a handy map of start_time to word and confidence Args: transcript: Amazon Transcript JSON Returns: (dict): a map of start_time to word and confidence """ merged_words = {} items = transcript["results"]["items"] for i, item in enumerate(items): # Only save pronunciations... may not be necessary, or may need other types if item["type"] == "pronunciation": word = item["alternatives"][0]["content"] confidence = item["alternatives"][0]["confidence"] # If the next item in the transcript is a punctuation, merge with the current word if i < len(items) - 1 and items[i + 1]["type"] == "punctuation": word += items[i + 1]["alternatives"][0]["content"] # Add the word to the map at start time merged_words[item["start_time"]] = { "content": word, "confidence": confidence, } return merged_words
20e779944bcbde8b7281cd5f807ad6cdb6660332
121,117
def concat_key_values(dicts, keys, new_key, join_string): """ Concatenate multiple dict fields into a new field """ print("concat keys {}".format(keys)) for d in dicts: concat = [] for key in keys: if not key in d: continue concat.append(str(d[key])) d[new_key] = join_string.join(concat) return dicts
434cd6792fe8e963f888c64d5b4f199cf9b302e9
411,999
def write_out_file(contents, filename, mode='w'): """Write contents to outfile directly. Default mode is truncate/create new file; pass mode='a' if append to existing. """ try: with open(filename, mode) as f: f.write(contents) except Exception as e: print(f'Error: {e}') return None else: return filename
0b86e0dbb480fe8c11c1fb446831d7221d9d0dc5
407,915
def create_search_url(base_url, keyword_list): """Create Google search URL for a keyword from keyword_list Args: keyword_list (list): list of strings that contain the search keywords base_url (str): Google's base search url Returns: list: Google search url like https://www.google.com/search?q=pizza """ search_kw = [kw.replace(" ", "+") for kw in keyword_list] # replace space with '+' search_query = [base_url + sq for sq in search_kw] return search_query
f50b4c3545bf7f2efcf5a7640c4fe8bc98f031df
524,015
def split_field(split_on): """Returns a function that splits input text, based on given parameter.""" def _splitter(text): return map(str.strip, text.split(split_on)) return _splitter
de65043111af1de782074ff9b3e5619d28911208
508,025
def Npart(request): """ Fixture for returning the number of particles in a dataset. Needed because indirect=True is used for loading the datasets. """ return request.param
206de0a906dcabde28dd7611754a86c65b1211f5
144,272
def remove_url(tweet): """ Removes '<url>' tag from a tweet. INPUT: tweet: original tweet as a string OUTPUT: tweet with <url> tags removed """ return tweet.replace('<url>', '')
dc0a5696d2e21721004fee6e1de1a13be50a8bf3
303,208
def compute_avna(prediction, ground_truths): """Compute answer vs. no-answer accuracy.""" return float(bool(prediction) == bool(ground_truths))
9a19da6de0eab7b2993ef32f9d5754bd1c73eb04
448,295
import math def calculate_tc( max_flow_length, #units of meters mean_slope, # percent slope const_a=0.000325, const_b=0.77, const_c=-0.385 ): """ calculate time of concentration (hourly) Inputs: - max_flow_length: maximum flow length of a catchment area, derived from the DEM for the catchment area. - mean_slope: average slope, from the DEM *for just the catchment area*. This must be percent slope, provided as an integer (e.g., 23, not 0.23) Outputs: tc_hr: time of concentration (hourly) """ if not mean_slope: mean_slope = 0.00001 return ( const_a * math.pow(max_flow_length, const_b) * math.pow((mean_slope / 100), const_c) )
bf0976466441b65b836693b65982e7f2a238ce96
290,092
from datetime import datetime def values(csv_row: dict, timestamp=None): """ Return a dictionary of the telemetry values from the row. If timestamp is None, add current timestamp in seconds. """ if timestamp is None: timestamp = datetime.now().timestamp() def valid_key(key): return key not in ('date', 'time') return dict(((k, v) for k, v in csv_row.items() if valid_key(k)), timestamp=timestamp)
f7f942ee8b626aad7fe088c07f89d4e746cf7635
208,133
def normalize_houndsfield(data_): """Normalizes houndsfield values ranging from -1024 to ~+4000 to (0, 1)""" cpy = data_ + 1024 cpy /= 3000 return cpy
74bd07b6e315de9c256f447b7b45387943660526
536,390
def _detects_peaks(ecg_integrated, sample_rate): """ Detects peaks from local maximum ---------- Parameters ---------- ecg_integrated : ndarray Array that contains the samples of the integrated signal. sample_rate : int Sampling rate at which the acquisition took place. Returns ------- choosen_peaks : list List of local maximums that pass the first stage of conditions needed to be considered as a R peak. possible_peaks : list List with all the local maximums in the signal. """ # Minimum RR interval = 200 ms min_rr = (sample_rate / 1000) * 200 # Computes all possible peaks and their amplitudes possible_peaks = [i for i in range(0, len(ecg_integrated)-1) if ecg_integrated[i-1] < ecg_integrated[i] and ecg_integrated[i] > ecg_integrated[i+1]] possible_amplitudes = [ecg_integrated[k] for k in possible_peaks] chosen_peaks = [] # Starts with first peak if not possible_peaks: raise Exception("No Peaks Detected.") peak_candidate_i = possible_peaks[0] peak_candidate_amp = possible_amplitudes[0] for peak_i, peak_amp in zip(possible_peaks, possible_amplitudes): if peak_i - peak_candidate_i <= min_rr and peak_amp > peak_candidate_amp: peak_candidate_i = peak_i peak_candidate_amp = peak_amp elif peak_i - peak_candidate_i > min_rr: chosen_peaks += [peak_candidate_i - 6] # Delay of 6 samples peak_candidate_i = peak_i peak_candidate_amp = peak_amp else: pass return chosen_peaks, possible_peaks
ab9dd461f65095f048942ab8b8c069c456ea4933
11,554
def mususpension(temperature, concentration): """Returns the dynamic viscosity of the suspension. Regression using data from Petkov, G. D. (1996). See Appendix X. #Inputs: #temperature :Temperature of the medium in the PBR ; °C #concentration :Biomass concentration ; g.L-1 #Outputs: #visco : Dynamic viscosity of the suspension ; mPa.s """ # See regression for non excreting strains visco = (1.507027 - 0.019466*temperature + 0.035762*concentration) return visco
30445aaa7ca6cbd7a3f732214b66971e1a930f20
403,313
def min_exval(v1, v2): """ Return the smaller one between two extended values.""" if v1 <= v2: return v1 return v2
ab9e1e41211feaa5fcb824c5fce83097c80f31fa
489,982
def undo_squash_to_unit_interval(x: float, constant: float) -> float: """Computes the input value of squash_to_unit_interval given the output.""" if constant <= 0: raise ValueError('Squash constant must be greater than zero.') if 0 > x >= 1: raise ValueError('Undo squash can only be performed on a value in [0, 1).') return (x * constant) / (1 - x)
e421e6ca05c2af1d323f855dc3e9d8fd430a1eb0
143,434
def _get_display_name(name, display_name): """Returns display_name from display_name and name.""" if display_name is None: return name return display_name
bb14d4f1719dbd1abe1543d48ab7a8cf8c9e9609
207,173
def hex_reverse(integer, size): """ Reverse a hex string, bf99 -> 99bf """ string = '{0:0{1}x}'.format(integer, size) return ''.join([string[i-2:i] for i in range(len(string), 0, -2)])
822a7e1be806ac551288295533f2d86d4689f2a0
151,030
from typing import List from typing import Dict from typing import Any def assert_typing( input_text_word_predictions: List[Dict[str, Any]] ) -> List[Dict[str, str]]: """ this is only to ensure correct typing, it does not actually change anything Args: input_text_word_predictions: e.g. [ {"char_start": 0, "char_end": 7, "token": "example", "tag": "O"}, .. ] Returns: input_text_word_predictions_str: e.g. [ {"char_start": "0", "char_end": "7", "token": "example", "tag": "O"}, .. ] """ return [ {k: str(v) for k, v in input_text_word_prediction.items()} for input_text_word_prediction in input_text_word_predictions ]
0835bad510241eeb2ee1f69ac8abeca711ebbf53
2,323
def _category_and_fandom(soup): """ .. versionadded:: 0.3.0 Returns the FanFiction category and fandom from the soup. * Category is one of nine possible categories from ``['Anime/Manga', 'Books', 'Cartoons', 'Comics', 'Games', 'Misc', 'Movies', 'Plays/Musicals', 'TV']`` * Fandom is the specific sub-category, whereas category may be ``Plays/Musicals``, the fandom could be ``RENT``, ``Wicked``, etc. :param soup: Soup containing a page from FanFiction.Net :type soup: bs4.BeautifulSoup class :returns: Tuple where the first item is the category and the second item is the fandom. :rtype: tuple. .. code-block:: python from ffscraper.fanfic.story import __category_and_fandom from bs4 import BeautifulSoup as bs import requests r = requests.get('https://www.fanfiction.net/s/123') html = r.text soup = bs(html, 'html.parser') print(_category_and_fandom(soup)) .. code-block:: bash ('Plays/Musicals', 'Wicked') """ c_f = soup.find('div', {'id': 'pre_story_links'}).find_all('a', href=True) return c_f[0].text, c_f[1].text
807c4c3e3928726820f3075a9f896e3231b66814
237,142
def title_case(sentence): """ Converts enetered string into the title_case Parameters ----------- sentence : string string to be converted into sentence case Returns ------- title_case_sentence : string string in TITLE CASE Example ------- >>>title_case('ThiS iS a StRING') This Is A String. >>> """ # Check that input is a string if not isinstance(sentence, str): raise TypeError('Invalid input {} - Input must be of type string'.format(sentence)) # Error if empty StRING if len(sentence) == 0: raise ValueError('Cannot apply title case to empty string') title_case_sentence = "" list_of_words = sentence.split(' ') for words in list_of_words: words = words[0].upper() + words[1:].lower() title_case_sentence = title_case_sentence + " " + words return title_case_sentence.strip()
8cef40ec1f14f78fa4211aaf896b9b2426a0682e
665,133
def get_model_identifier(filename: str) -> str: """Returns model identifier.""" return filename.split("_")[-1][:-3]
089dfa9f137ba3d95fc554d009a97e7ed6244fb9
384,056
def binary_or(a: int, b: int): """ Take in 2 integers, convert them to binary, and return a binary number that is the result of a binary or operation on the integers provided. >>> binary_or(25, 32) '0b111001' >>> binary_or(37, 50) '0b110111' >>> binary_or(21, 30) '0b11111' >>> binary_or(58, 73) '0b1111011' >>> binary_or(0, 255) '0b11111111' >>> binary_or(0, 256) '0b100000000' >>> binary_or(0, -1) Traceback (most recent call last): ... ValueError: the value of both input must be positive >>> binary_or(0, 1.1) Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> binary_or("0", "1") Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: raise ValueError("the value of both input must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] max_len = max(len(a_binary), len(b_binary)) return "0b" + "".join( str(int("1" in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len)) )
514fa4a02b778dfa91c4097bb8916522339cda33
705,570
def c_equals(value_1, value_2): """The intrinsic conditional function Fn::Equals Compares if two values are equal. Returns true if the two values are equal or false if they aren't. """ return {'Fn::Equals': [value_1, value_2]}
9ce9b066837e67c0a7c5f18796d294dd0b9a90e5
363,545
from typing import Union from datetime import datetime def parse_nanotimestamp(s: str) -> Union[int, float]: """Parse datetime string with nanoseconds Args: s (str): datetime string Returns: datetime: datetime object """ tz = "" if s[-1] == "Z": # Add explicit UTC timezone tz = "Z+0000" # Get milliseconds and convert it to unix timestamp return datetime.strptime(s[0:23] + tz, "%Y-%m-%dT%H:%M:%S.%fZ%z").timestamp()
5e472ac10a7948dba02ad12bba186ddb3796f927
311,459
def num_tags(tree, tag): """Returns the total number of tags in the tree of name tag Example, count planets: num_tags(tree, 'planet') :param tree: lxml etree :param tag: string containing <tag> name. :returns: integer representing number of tags found. """ return int(tree.xpath("count(.//" + tag + ")"))
2ac01dbf99891f6ab966728fa20d60b2e3e1517d
286,891
def isbatch(line): """ Batches are wrapped in BHS / BTS or have more than one message BHS = batch header segment BTS = batch trailer segment """ return line and ( line.strip()[:3] == "BHS" or (line.count("MSH") > 1 and line.strip()[:3] != "FHS") )
8aaab051ddd3b8e67f8da80ba2e055312c82f473
168,023
def find_aligned_codons(aln): """Returns the columns indices of the alignment that represent aligned codons. """ # throw out codons with non mod 3 gaps ind2 = [] for i in range(0, aln.alignlen(), 3): bad = False for key, val in aln.iteritems(): codon = val[i:i+3] if "-" in codon and codon != "---": bad = True break if not bad: ind2.extend([i, i+1, i+2]) return ind2
ff04f1db152a15d60112a792d25a4e521cca4470
415,948