content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def delete_package_versions(client, domain, domainOwner, repository, format, namespace, package, list_of_versions): """Delete package versions from CodeArtifact repository""" response = client.delete_package_versions( domain=domain, domainOwner=domainOwner, repository=repository, format=format, namespace=namespace, package=package, versions=list_of_versions ) return response
96acf0d34d03e979b294a0bc8c819ac6e0b11343
121,937
def create_label_from_filename(filename): """ Takes a filename and turns it into a list with a single lowercase string containing the pet label that is in the filename (ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier') Parameters: filename - The filename (string) Returns: List with filename turned into lowercase string stripped of numbers and extensions (e.g. 'Boston_terrier_02259.jpg' => ['boston terrier']) """ name_list = filename.split("_")[:-1] for idx in range(0, len(name_list)): name_list[idx] = name_list[idx].lower() return [" ".join(name_list)]
fde10583608fd70471dd9f8effe4535f56bc0560
498,166
def open_mode(request): """Pytest fixture parameterized over multiple file open modes.""" return request.param
4c2d3c0c35389559e7ab893d0a432ef13209114a
517,674
def is_distribution(distribution): """ Checks if the given mapping is a probability distribution (sums up to 1). Parameters ========== distribution: a mapping from integers to probabilities Returns ======= True if values in `distribution` sum up to 1. """ probabilities = distribution.values() return round(sum(probabilities), 8) == 1
8049c6e032b2673d40d4d411970536cff4d0ad60
406,485
def _run_callable_with_postamble(postamble, callable_, *args, **kwargs): """Returns a callable of no args that invokes callable_ (with the specified args and kwargs) and then invokes postamble (with no args). The callable returns the result of (or exception thrown by) callable_. """ def fn(): try: return callable_(*args, **kwargs) finally: postamble() return fn
521b9a88c95b7cc0120b85c87138fc77cc852fbf
527,050
def readable_file_size(file_bytes, precision): """ get size for file or folder :param file_bytes: ζ–‡δ»Άε€§ε°οΌŒε•δ½οΌšε­—θŠ‚ζ•°bytes :param precision: ε°ζ•°η‚ΉεŽδ½ζ•°οΌŒε―δ»₯是0 :return: """ for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']: if abs(file_bytes) < 1024.0: return '%s %s' % (format(file_bytes, '.%df' % precision), unit) file_bytes /= 1024.0 return '%s %s' % (format(file_bytes, '.%df' % precision), 'Yi')
cf87d2317290a3a1b68d3367303c29391f1bc177
284,914
def _call_signature(callable, *args, **kwargs): """ Generate a human-friendly call signature From recipe http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/307970 """ argv = [repr(arg) for arg in args] + ["%s=%r" % x for x in kwargs.items()] return "%s(%s)" % (callable.__name__, ", ".join(argv))
24ae6a7ac4cb1285c2d9c1de11698da1f10266fb
621,129
from functools import reduce import operator def prod(iterable): """Take product of iterable like.""" return reduce(operator.mul, iterable, 1)
b0534333e870723e88eec01dd7e765185381b6a7
678,425
def format_suffix(altradio=None, radioversion=None, core=False): """ Formulate suffix for hybrid autoloaders. :param altradio: If a hybrid autoloader is being made. :type altradio: bool :param radioversion: The hybrid radio version, if applicable. :type radioversion: str :param core: If we're using a core OS image. Default is false. :type core: bool """ suffix = "_R{0}".format(radioversion) if altradio and radioversion else "" if core: suffix += "_CORE" return suffix
9237d7f7dd32091165d0d51bee174b908a030381
526,385
import sqlite3 def initialize_db(db): """ Initializes the database and creates the table if necessary :param db: (string) The database to use :return: """ conn = sqlite3.connect(db) c = conn.cursor() c.execute("CREATE TABLE IF NOT EXISTS `Assignments` ( " "`GlobalID` TEXT UNIQUE, " "PRIMARY KEY(`GlobalID`) )") conn.commit() return conn
b3186ecaadbd62fce0c587bd4fe510e3e12a0cfb
282,520
async def index(request): """ This is the view handler for the "/" url. :param request: the request object see http://aiohttp.readthedocs.io/en/stable/web_reference.html#request :return: context for the template. """ # Note: we return a dict not a response because of the @template decorator return { 'title': request.app['settings'].name, 'intro': "Success! you've setup a basic aiohttp app.", }
254a1a741efb3c7bf3df57b046bacb94294c397f
653,817
def singleton(cls): """ Decorator to generate a singleton, prevents from creating multiple instances of a class """ instance = None def get_instance(*args, **kwargs): nonlocal instance if instance is None: instance = cls(*args, **kwargs) return instance return get_instance
7686381de243185fd90c3aa9e158332f91f7ec89
379,974
def edge_to_GeoJSON(edge, graph, color="#F5A207"): """Convert edge to GeoJSON string.""" start = edge[0] end = edge[1] start_lon = graph.nodes[start]['lon'] start_lat = graph.nodes[start]['lat'] end_lon = graph.nodes[end]['lon'] end_lat = graph.nodes[end]['lat'] edge_string = '' edge_string += '{ "type" : "Feature",\n' edge_string += '"geometry" : {"type": "LineString", ' edge_string += '"coordinates": [[%f,%f], [%f,%f]]},\n'%( start_lon, start_lat, end_lon, end_lat) edge_string += '"properties": {"marker-color": "%s"}\n'%(color) edge_string += '}\n' return edge_string
13ce331fd8ec281d0b4a04b671738f36ae943828
227,804
import json def putTableAll(obj): """ Returns table as string showing standings of league with all data Parameters: ----------- obj: dict JSON object of league standings obtained from API/cache Returns: -------- str Standings as a text code block (to get monospaced text) showing all data """ try: assert(type(obj) == dict) fin = open('source/teamcodes.json', 'r') mapper = json.load(fin) str_re = '```\nLEAGUE: ' + str(obj['competition']['name']) +\ ' ' * (45 - 2 - 8 - 10 - len(str(obj['competition']['name']))) +\ 'MATCHDAY: ' + str(obj['season']['currentMatchday']) + '\n' str_re += '╔════╀══════╀════╀════╀════╀════╀═════╀═════╗\n' str_re += 'β•‘ SN β”‚ TEAM β”‚ M β”‚ W β”‚ D β”‚ L β”‚ PTS β”‚ GD β•‘\n' str_re += '╠════β•ͺ══════β•ͺ════β•ͺ════β•ͺ════β•ͺ════β•ͺ═════β•ͺ═════╣\n' for team in obj['standings'][0]['table']: text = 'β•‘ %-2d β”‚ %-4s β”‚ %-2d β”‚ %-2d β”‚ %-2d β”‚ %-2d β”‚ %-3d β”‚ %+-3d β•‘\n'\ % (team['position'], mapper.get(team['team']['name'], team['team']['name'][:4])[:4], team['playedGames'], team['won'], team['draw'], team['lost'], team['points'], team['goalDifference']) str_re += text str_re += 'β•šβ•β•β•β•β•§β•β•β•β•β•β•β•§β•β•β•β•β•§β•β•β•β•β•§β•β•β•β•β•§β•β•β•β•β•§β•β•β•β•β•β•§β•β•β•β•β•β•```' fin.close() return str_re except AssertionError: return 'Error!'
ae46f33be6200363ab2876fd4d95a1217d719305
39,098
import calendar def get_year_for_first_weekday(weekday=0): """Get the year that starts on 'weekday', eg. Monday=0.""" if weekday > 6: raise ValueError("weekday must be between 0 and 6") year = 2020 not_found = True while not_found: firstday = calendar.weekday(year, 1, 1) if firstday == weekday and not calendar.isleap(year): not_found = False else: year = year - 1 return year
21bc137686b1cdc046fabdc0a58cfd0a5a147682
572,671
import math def cosine_rule(v_original: float, v_target: float, angle_dif: int) -> float: """Apply the cosign rule to compute the Delta-V needed to transfer from one velocity to another with a difference in angle. Args: v_original: the original velocity. v_target: the target velocity. angle_dif: the angle at which the 2 velocities differ in degrees. Returns: the length of the velocity vector connecting the 2 ends of v_original and v_target.""" return math.sqrt(((v_original ** 2) + (v_target ** 2)) - (2 * v_original * v_target * math.cos(math.radians(angle_dif))))
3d0274e4ae98ff076c75341f709c785cb6430007
43,811
def smash_candies(total_candies, friend_count=3): """ Function for Candy-sharing friends Alice, Bob and Carol. They have some candies which they plan to split evenly among themselves. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they'll take 30 each and smash 1. Also will calculate the number of candies to smash for *any* number of total candies. Modifying it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before. -End of Doc String for the function- """ return total_candies % friend_count
685521e615374bfc629b4764b013bc9f89fe80ce
57,566
def int_check(self, value, key): """ Cast a value as an integer :param value: The value to cast as int :param key: The value name :return: The value as an integer, otherwise an error message """ try: int(value) return int(value), '' except: return None, 'Error: %s value must be a integer' % (key)
4529cc80104dd075539f6e846dddd8814756e116
31,873
import json def grab_titles(filename): # this one's functioning properly! """ Grabs all the buzzes' titles in the provided json file and loads them into a list """ title_list = [] #initializing an empty list where the titles will go f = open(str(filename), 'r') #opening the file jsonfile = json.loads(f.read()) #taking a peak inside and assuming we can understand it as a json for item in jsonfile['buzzes']: title_list.append(item['title']) # grabbing all the titles we can find return title_list
b8df23afff0fd42342674cefa5047cd95536a805
308,619
def _label(label: str) -> str: """ Returns a query term matching a label. Args: label: The label the message must have applied. Returns: The query string. """ return f'label:{label}'
10472e6850923d2f35bdff1fb3603f82293a3d15
691,613
def reduce_abbr(string): """Convert `&` to LaTeX `\&`. """ string = string.strip().replace("&", "\&") return string
e651fccd1291b3aacdcbd728750f5c994d945d18
172,746
def _get_trailing_metadata_from_interceptor_exception(exception): """Retrieves trailing metadata from an exception object. Args: exception: an instance of grpc.Call. Returns: A tuple of trailing metadata key value pairs. """ try: # GoogleAdsFailure exceptions will contain trailing metadata on the # error attribute. return exception.error.trailing_metadata() except AttributeError: try: # Transport failures, i.e. issues at the gRPC layer, will contain # trailing metadata on the exception iself. return exception.trailing_metadata() except AttributeError: # if trailing metadata is not found in either location then # return an empty tuple return tuple()
f9e8f9d1a7ddc660bf24117127ca8ede5e979465
172,773
def generate_linkage(dist_mat, max_dist): """ Create a nested dictionary linking genomes if their distance is within a certain threshold. :param dist_mat: pandas DataFrame with distances :param max_dist: maximum allowed distance to link genomes :return: a nested dictionary """ linkage = {} for i in range(len(dist_mat.index)): g1 = dist_mat.index[i] if not g1 in linkage.keys(): linkage[g1] = {} for j in range(i + 1, len(dist_mat.columns)): g2 = dist_mat.columns[j] if not g2 in linkage.keys(): linkage[g2] = {} distance = dist_mat.iloc[i, j] if distance <= max_dist: linkage[g1][g2] = "" linkage[g2][g1] = "" return linkage
16084a616c7e65bee410de6702fb13ba4a938e30
509,637
def iterate(source, *keys): """Iterate a nested dict based on list of keys. :param source: nested dict :param keys: list of keys :returns: value """ d = source for k in keys: if type(d) is list: d = d[int(k)] elif k not in d: d[k] = {} else: d = d[k] return d
8ff40d970f6a41b21415c53158f323dc8df73006
600,943
def normalize_baseuri(baseuri: str) -> str: """Normalize a baseuri If it doesn't end in a slash, add one. """ if baseuri[-1] != "/": return baseuri + "/" return baseuri
9e3938b84e99b49512d85f54bef6f26b3e8796e9
101,208
def parseOneDigit(n): """Given a single digit 1-9, return its name in a word""" if n == 1: return "One " elif n == 2: return "Two " elif n == 3: return "Three " elif n == 4: return "Four " elif n == 5: return "Five " elif n == 6: return "Six " elif n == 7: return "Seven " elif n == 8: return "Eight " elif n == 9: return "Nine " return ""
98eb1eda9bb4b428d0f73095dc21c65997516e00
370,349
def get_labels(G): """Gets all the various labels of a graph Args: G (Graph): graph Returns: [set]: Set of labels """ labels = set() for g in G: for i in range(g.number_of_nodes()): labels.add(g.nodes[i]["labels"][0]) return labels
7a39fbfcc68a073c1ed08f638c8e1b3bdc591dbf
129,031
import re def parse_input_numbers(s: str): """ Parse an input string for numbers and ranges. Supports strings like '0 1 2', '0, 1, 2' as well as ranges such as '0-2'. """ options = [] for option in re.split(", | ", s): match = re.search(r"(\d+)-(\d+)", option) if match: lower, upper = sorted([match.group(1), match.group(2)]) options = options + list(range(int(lower), int(upper) + 1)) else: try: options.append(int(option)) except ValueError: pass return options
e307862bd25da251e0a11757b9b52eae47c8a1c0
165,394
import requests def get_genome_sequence_ensembl(chrom, start, end): """ API described here http://rest.ensembl.org/documentation/info/sequence_region """ url = 'https://rest.ensembl.org/sequence/region/human/{0}:{1}..{2}:1?content-type=application/json'.format(chrom, start, end) r = requests.get(url, headers={"Content-Type": "application/json"}, timeout=10.000) if not r.ok: print("REST Request FAILED") decoded = r.json() print(decoded['error']) return else: print("REST Request OK") decoded = r.json() return decoded['seq']
a15ac669a454ed3b5d0672f9a429f93fa4709bac
306,494
def add(x: int, y: int): """A function to add stuff. :param x: A number, x :param y: A number, y :return: A number, x + y """ return x + y
3358800af03e094463b22296b393f6e935bf154c
25,752
import array def find_compensation(payload, secret): """Finds the u16bit word to append in the secret in order to obtain the same checksum as in the payload.""" if len(payload) % 2 == 1: payload += b"\0" if len(secret) % 2 == 1: secret += b"\0" sp = sum(array.array("H", payload)) # sum payload 16bit words ss = sum(array.array("H", secret)) # sum secret 16bit words val = sp - ss # subtract sums val = (val >> 16) + (val & 0xffff) # shift and mask 16bit for carry val += val >> 16 # make it unsigned return val
8eab72aac2b3aeb69b4c7c56fd74724417957db5
161,970
def distance_greater(vec1, vec2, length): """Return whether the distance between two vectors is greater than the given length.""" return ((vec1[0] - vec2[0])**2 + (vec1[1] - vec2[1])**2) > length**2
86be8f0b474e8a07d5dbb736597b80a0f4d84355
266,678
def interval_data_from_time_series(data, use_left_endpoint=False): """ This function converts time series data to piecewise constant interval data. A series of N time points and values yields N-1 intervals. By default, each interval takes the value of its right endpoint. In: ([t0, ...], {str(cuid): [value0, ...], },) Out: {str(cuid): {(t0, t1): value0 or value1, ...},} Arguments --------- data: tuple First entry is a list of time points, second entry is a dict mapping names each to a list of values at the corresponding time point use_left_endpoint: bool Optional. Indicates whether each interval should take the value of its left endpoint. Default is False, i.e. each interval takes the value of its right endpoint. Returns ------- dict of dicts Maps names to a dict that maps interval-tuples each to the value over that interval """ time, value_dict = data n_t = len(time) if n_t == 1: t0 = time[0] return { name: {(t0, t0): values[0]} for name, values in value_dict.items() } else: # This covers the case of n_t > 1 and n_t == 0 interval_data = {} intervals = [(time[i-1], time[i]) for i in range(1, n_t)] for name, values in value_dict.items(): interval_values = [ values[i-1] if use_left_endpoint else values[i] for i in range(1, n_t) ] interval_data[name] = dict(zip(intervals, interval_values)) return interval_data
e4f815393f799d0f27798bb18a5ca6d6c5dc2110
351,773
import math def dq0_to_abc(ud,uq,u0,wt=2*math.pi): """Convert to abc.""" ua = ud*math.cos(wt) - uq*math.sin(wt) + u0 ub = ud*math.cos(wt-(2/3)*math.pi) - uq*math.sin(wt-(2/3)*math.pi) + u0 uc = ud*math.cos(wt+(2/3)*math.pi) - uq*math.sin(wt+(2/3)*math.pi) + u0 return ua,ub,uc
8f1964137dd11016e853460abd7fbfc8413cef6d
478,692
import typing def extract_time_limit(time_limit: str) -> typing.Tuple[int, int]: """Extract hour and minutes from string. >>> extract_time_limit("09:00") 9, 0 >>> extract_time_limit("14:30") 14, 30 """ data = time_limit.split(":") parts_count = len(data) if parts_count == 1: print(f"Could not find minutes in {time_limit}, defaulting to 0") return int(data[0]), 0 return int(data[0]), int(data[1])
06b577d955ba05174f5096a00cb576c1c3648bc2
523,486
def mostrar_criterio(critStr: str): """ Muestra el criterio especificado en un formato legible en lenguaje natural para el usuario para que introduzca los valores esperados. Es necesario para valores concretos de "Filtros" Args: critStr (str): Criterio identificado por su string ("barrios") Returns: str: Cadena de texto con info mejorada para el usuario """ if critStr == "fromfecha": return "*desde una fecha* concreta (Introduce *mΓ‘s tarde* en formato: _DD/MM/AAAA_)" elif critStr == "tofecha": return "*hasta una fecha* concreta (Introduce *mΓ‘s tarde* en formato: _DD/MM/AAAA_)" else: return "por " + critStr
5af06d553732bfbf11001615a55f97920ca3629c
501,403
def square_n(n): """Returns the square of a number""" return int(n * n)
7d6e647b4e06758deec474b4355151f79afc15b3
578,039
def split_line(line): """ :param line: from an input data file. :return: lower-cased words split by whitespaces. """ return line.split('\t')[3].strip().lower().split(' ')
9932450bc1da3ec8e2313ba2fb7c2b42cefe4827
126,150
def get_pagination_readable_message(header: str, limit: int, page: int) -> str: """ Generate pagination commands readable message. Args: header (str): Message header limit (int): Number of elements to retrieve. page (int): Page number. Returns: str: Readable message. """ readable_message = f'{header}\n Current page size: {limit}\n Showing page {page} out of others that may exist.' return readable_message
a347f1993044bc5bb766d56b515bfb00357e1c6d
525,486
def remove_users(users, minNumRatings = 10): """ Remove users with fewer ratings than minNumRatings. Parameters: ----------- users : Pandas DataFrame the dataframe corresponding to user/anime pairs (and the rating given) minNumRatings : int minimum number of anime a user must have rated to not be removed. Returns: -------- users : Pandas dataframe the dataframe corresponding to user/anime pairs (and the rating given) """ #Remove users with fewer than minNumRatings views vc = users.user_id.value_counts() low_ratings = vc[vc.map(lambda x: x < minNumRatings)].index users.drop(users[users.user_id.map(lambda x: x in low_ratings)].index, inplace=True) return users
7d218ffbf537996f62f7f379861d9a635675cf29
240,790
from typing import Optional def _get_pipeline_runtime_type(pipeline_definition: dict) -> Optional[str]: """Return the runtime type name associated with the given pipeline""" return pipeline_definition.get("pipelines", [{}])[0].get("app_data", {}).get("runtime_type")
0b7af9c7a85547b159d3fb563daa780c2f3d3b85
346,446
def narrow_seat_position( char: str, lower_char: str, upper_char: str, lower_limit: int, upper_limit: int ) -> tuple[int, int]: """Half the range of the seat position ([lower_limit, upper_limit]) given the value of char.""" half_range = (upper_limit - lower_limit + 1) / 2 if char == lower_char: return lower_limit, int(upper_limit - half_range) elif char == upper_char: return int(lower_limit + half_range), upper_limit else: raise ValueError( f"unexpected char ({char}) with lower_char ({lower_char}) and upper_char ({upper_char})" )
be332a78ac7e6dc2c6d86168723f8098a99e9ec1
578,874
def calib_max_pulse(joint, angle): """ Utility for modifying maximum pulse length sent to servos. Inputs: joint: joint object angle: joint angle for which to modify pulse Returns: updated pulse value for input angle (int) """ print(f'Current {joint.name} {angle}Β° pulse: {joint.max_pulse}. ' 'Enter new value, or press "enter" to proceed: ') pulse = joint.max_pulse while True: joint.set_angle(angle) new_val = input(': ') if not new_val: return int(pulse) try: pulse = new_val joint.set_pulses(joint.min_pulse, pulse) except (ValueError, TypeError): print('Invalid input. Please enter a positive integer.')
1a698012c1053e391de120c8d9dec8064646d0ad
554,325
def has_context(json_ld: str) -> bool: """ Returns True if '"@context" is in ``json-ld`` and False otherwise. """ return '"@context"' in json_ld
ca581ae5982c13a375d769078e89d95290d2c347
286,770
import logging def get_logger(name: str) -> logging.Logger: """Gets you a descendant of the discord logger. That way it's easier when reading the log file to see at a glance where the info came from. """ return logging.getLogger(f"discord.{name}")
8a24a81ca124a3465031892cdb2ae741ba077ee4
409,509
def minimum_int(num: float, min_num=1): """Given a number make it at least min_num and ensure it is an int.""" num = int(num) if num < min_num: return min_num else: return num
d9dfb245a88bc207ebedb5becf2885819fb706bc
136,443
def lcamel(n: str) -> str: """Convert a string in upper or lower camel case to lower camel case""" return n[0].lower() + n[1:]
989785bc9767318b53818d9dcc1247cd86cf31ee
419,122
def compute_epsg(lon, lat): """ Compute the EPSG code of the UTM zone which contains the point with given longitude and latitude Args: lon (float): longitude of the point lat (float): latitude of the point Returns: int: EPSG code """ # UTM zone number starts from 1 at longitude -180, # and increments by 1 every 6 degrees of longitude zone = int((lon + 180) // 6 + 1) # EPSG = CONST + ZONE where CONST is # - 32600 for positive latitudes # - 32700 for negative latitudes const = 32600 if lat > 0 else 32700 return const + zone
cad69a5811d0900547ff0649c4054f44b9dd0ce2
433,778
def get_sub_event_id(subscription, use_charge=None): """ Returns a formatted id for a subscription (or a use_charge on that subscription) that can be used as `event_id` in a `Transaction`. """ substr = "sub_%d/" % subscription.id if use_charge: substr += "%d/" % use_charge.id return substr
5d9452fab7b1ab7c82d8894a4e584d1363007062
518,934
def weight_to_line(w): """ - Takes weight vector w - Returns slope m and intercept b of line We know that w^T x = 0 for points (x1,x2) on the line, so w0 * 1 + w1 * x1 + w2 * x2 = 0 w0 + w1 * x1 + w2 * x2 = 0 Let's solve for x2: w2 * x2 = -w0 - w1*x1 => x2 = -(w0/w2) + (-w1/w2) * x1 This is the equation of a line x2 = b + m*x1. The intercept is b = -(w0/w2), the slope is m = -w1/w2 Let's return both """ b = -(w[0] / w[2]) m = -(w[1] / w[2]) return b, m
293d744be409a7ed8dfc5ec1929196adf7f64741
524,605
def catch_input(default_value, desired_type, input_message = "Input: ", failure_message = "Invalid input.", default_message = "Default value used.", num_attempts = 3): """ Function to better catch type errors in user input. If the input can be parsed using desired_type, then this value is returned. Otherwise, the user will be asked again a number of times (up to num_attempts) and if the input still results as an error, then the default_value is returned. Parameters ---------- default_value : str Value returned if all inputs fail. Must be able to be parsed by desired_type. desired_type : type Desired type of the input (e.g. str, int). input_message : str, optional Prompt to user for input. The default is "Input: ". failure_message : str, optional Message to print when input fails. The default is "Invalid input.". default_message : str, optional Message to print when default_value used. The default is "Default value used.". num_attempts : int, optional Number of times to attempt to prompt for input. The default is 3. Returns ------- type as specified by desired_type Value of input if successful, or default_value otherwise. """ attempt = 0 while attempt < num_attempts: try: return desired_type(input(input_message)) break except: print(failure_message) attempt += 1 continue else: print(default_message) return desired_type(default_value)
c3177bfabff11c9875e6ef80c22f06d251b2447a
646,203
def is_marker_blue(marker): """ Check color of marker :param marker: :return: True if marker == blue """ # type: (Marker) -> bool return marker.color.r == 0.0 and marker.color.g == 0.0 and marker.color.b == 1.0
47dc897b89993da48f6a311eb02c0e3bbd354e29
546,994
def is_special_char(astring): """ (str) -> Boolean returns True if astring contains a special character:!, @, #, $, %, ^, & else return False. >>> is_special_char('CIS122') False >>> is_special_char('CIS-122') False >>> is_special_char('CIS122!') True """ special = '!@#$%^&' for c in astring: if c in special: return True return False
db322ed7258347a7ca25e98ed0eebbf174013742
421,874
def keys_volume_type_get(volume_type_id, **kwargs): """ Return extra specs of the specified volume type. """ url = "/types/{volume_type_id}/extra_specs".format( volume_type_id=volume_type_id) return url, {}
2f698f87aeddcf62e8dfd79a90db8506e2f683bb
529,474
import hashlib def hash_generator(file, hash_algorithm): """ Generate a hash for a file based on a given hash algorithm Parameters ---------- file : bytes File to be ran through the hash algorithm hash_algorithm : str Hash algorithm to use Returns ------- String of the file hash generated by the given hash algorithm. """ h = hashlib.new(hash_algorithm) h.update(file) return h.hexdigest()
57031cdd17a3781562b50c759d4a4c2b58313348
234,781
import math import random def random_word_fix(tokens, vocab_range, mask, default_num=10): """ Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of int, tokenized sentence. :param vocab_range: for choosing a random word :return: (list of int, list of int), masked tokens and related labels for LM prediction """ total_len = len(tokens) mask_len = math.ceil(default_num * 0.15) mask_num = random.sample([_ for _ in range(total_len)], mask_len) output_label = [-1 for _ in range(total_len)] for mask_index in mask_num: token = tokens[mask_index] tokens[mask_index] = mask output_label[mask_index] = token return tokens, output_label
fc6450dc58d2b4bd63f8260a9e63b43ed945b1bd
107,238
import torch def get_optimizer(parameters, lr, weight_decay): """ Initiate Adam optimizer with fixed parameters Args: parameters: filter, parameters to optimize lr: float, initial learning rate weight_decay: float, between 0.0 and 1.0 Return: a torch.optim.Adam optimizer """ return torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
54b4c6a4cd02672ebfc8ff9c850f0d601fc6510f
14,165
def filter_fuzzers(experiment_df, included_fuzzers): """Returns table with only rows where fuzzer is in |included_fuzzers|.""" return experiment_df[experiment_df['fuzzer'].isin(included_fuzzers)]
1768494a1cd2a01da18babd52cc507be2b048c2c
451,720
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): """Compute max metric between prediction and each ground truth.""" scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths)
89538e53df32b991e3dbef180b91376af3d93caa
536,709
from typing import List def uniform_knot_vector(n: int, order: int) -> List[float]: """ Returns an uniform knot vector for a B-spline of `order` and `n` control points. `order` = degree + 1 Args: n: count of control points order: spline order """ return [float(knot_value) for knot_value in range(0, n + order)]
12fa2e988579ae423e2b1f33fd88a5a6baf77111
556,456
def null_query(x): """ Returns false regardless of the document passed to the function. """ return False
cd64cb092a03991ee45aa0a58c9bbfd708b19b5b
566,514
def _is_auth_required(entity, includes): """ Some calls require authentication. This returns True if a call does, False otherwise """ if "user-tags" in includes or "user-ratings" in includes: return True elif entity.startswith("collection"): return True else: return False
8a32a7a51b168779bf836f8ccf231cbbe15f9698
170,396
from typing import List def add_tag(new_tags: List, tag_collection: dict, image_label: str): """ This function accepts the new tags, the collection to store the tag, the label of the image and returns the updated collection by adding the tags and image label to the collection. :param new_tags (List): The List of tags to be added :param tag_collection (Dict): The collection containing the tags :param image_label (str): The label of the image :return tag_collection (Dict): The collection containing the updated tags """ for tag in new_tags: if tag not in tag_collection: tag_collection[tag] = [] if image_label not in tag_collection[tag]: tag_collection[tag].append(image_label) return tag_collection
7e59953833335feac5665d08832aae90ce1a4c90
167,535
def gt_miss(g): """ Return True if sample genotype is missing. Parameters ---------- g : str Sample genotype. Returns ------- bool True if sample genotype is missing. Examples -------- >>> from fuc import pyvcf >>> pyvcf.gt_miss('0') False >>> pyvcf.gt_miss('0/0') False >>> pyvcf.gt_miss('0/1') False >>> pyvcf.gt_miss('0|0:48:1:51,51') False >>> pyvcf.gt_miss('./.:.:.') True >>> pyvcf.gt_miss('.:.') True >>> pyvcf.gt_miss('.') True >>> pyvcf.gt_miss('./.:13,3:16:41:41,0,402') True """ return '.' in g.split(':')[0]
ea68704f144eca949aacc03c15a5de3106416e56
510,087
def get_contact_info_keys(status_update): """Returns the contact info method keys (email, sms) used to send a notification for a status update if the notification exists. Returns [] if there is no notification """ if hasattr(status_update, 'notification'): return list(status_update.notification.contact_info.keys()) else: return []
020a9742df99cd65be1433165823c4f364009d85
15,879
def load_rob_ephemeris(fname): """ Use to grab the ephemeris points in one of Rob's simulation data files ***PARAMS*** fname [string]: name (including path) of file containing ephemeris pts ***RETURNS** Roblons [list of floats]: longitudes Roblats [list of floats]: latitudes Robalts [list of floats]: altitudes * This function was written for parsing Rob Gillies' simulation's files. * """ f = open(fname) Roblons = [] Roblats = [] Robalts = [] for line in f: l = line.split() try: Roblons.append(float(l[1])) Roblats.append(float(l[2])) Robalts.append(float(l[3])) except: continue return Roblons,Roblats,Robalts
8788dbd9472113bd2e40f6fe9bd75040ad593de0
162,766
import aiohttp async def dog_image(): """ Fetches an URL of a random dog image. """ async with aiohttp.ClientSession() as session: async with session.get("https://dog.ceo/api/breeds/image/random") as r: return (await r.json())['message']
7dacf206cca2d8f19f93f15d3b038b1db9cb5d60
153,947
def _format_host_port_alias(host, port, alias): """Format a host, port, and alias so it can be used for comparison or display.""" if alias: return f"{alias}@{host}:{port}" return f"{host}:{port}"
adaaee9a063df78e35a9df521173478c170e75c5
625,167
def getAllDifferentValues(clustersPerEpoch): """ Get all the different values ocurring during a simulation :param clustersPerEpoch: List with dictionaries for all epochs. The dictionary has the set of different values (according to column) and their number :type clustersPerEpoch: list :returns: set -- Set containing all values ocurring during a simulation """ allValues = set() for epochSummary in clustersPerEpoch: for value in epochSummary: allValues.update([value]) return allValues
99f5d48d0b555118af38660848b6392e8024d6e9
486,133
import logging import requests def post_to_slack(webhook_url, payload, headers): """ Purpose: Post Payload (JSON) to Slack Channel Args: webhook_url (String): URL for Slack Webhook accepting requests payload (JSON Obj): JSON Object payload to send Slack Webhook headers (Dict): Dictionary of headers to send Slack Webhook Returns: response (Requests Reponse Object): Response object from Slack Webhook """ logging.info(f"Sending Alert in Slack: {payload}") response = requests.post( webhook_url, data=payload, headers=headers, ) if response.status_code != 200: error_msg = f"Slack Message Failed to Send: code = {response.status_code}, "\ "message = {response.message}" logging.error(error_msg) raise Exception(error_msg) return response
2f8b277f3e43e7f4dbcf396e2f643ec187b4061e
129,935
def getattrchain(obj, chain, default=None): """Like getattr, but the attr may contain multiple parts separated by '.'""" for part in chain.split('.'): if hasattr(obj, part): obj = getattr(obj, part, None) else: return default return obj
eb3ee6cc78d5cbed29a87da1e5065112bfe3adf3
493,968
import csv def get_data_from_csv(csv_map): """Return the data of a csv file.""" with open('tests/' + csv_map, encoding='utf-8') as csvfile: readCSV = csv.reader(csvfile, delimiter=',') result = [] for row in readCSV: result.append(row) return result
6279c1e76dfbf4223e2e0ed3a3abc744b201d645
160,778
def get_multiplier(factor): """ Convert the factor into a number. :param factor: the string 'mb', 'm', or 'k' :return: 10000000, 1000000, 1000 or 1 """ if factor.lower() == 'mb': return 10000000 elif factor.lower() == 'm': return 1000000 elif factor.lower() == 'k': return 1000 return 1
ec3467eae3d8c285188c65768c05056fe0b5e7eb
113,366
def gdx_isnan(val,gdxf): """ Utility function for equating the GDX special values that map to None or NaN (which are indistinguishable in pandas). Parameters ---------- val : numeric value to test gdxf : GdxFile GDX file containing the value. Provides np_to_gdx_svs map. Returns ------- bool True if val is a GDX encoded special value that maps to None or numpy.nan; False otherwise """ return val in [gdxf.np_to_gdx_svs[0],gdxf.np_to_gdx_svs[1]]
915baa0f7d00bc8b4121005a92519e0d5a052f91
598,531
import math def cie76(c1, c2): """ Color comparision using CIE76 algorithm. Returns a float value where 0 is a perfect match and 100 is opposing colors. Note that the range can be larger than 100. http://zschuessler.github.io/DeltaE/learn/ LAB Delta E - version CIE76 https://en.wikipedia.org/wiki/Color_difference E* = 2.3 corresponds to a JND (just noticeable difference) """ l = c2[0] - c1[0] a = c2[1] - c1[1] b = c2[2] - c1[2] return math.sqrt((l * l) + (a * a) + (b * b))
9470b66231252decd8be7f07af2591ddf1278edc
701,963
def create_cart(skus): """ Create the cart :param skus: list of items :return: A dictionary representing the cart """ distinct_skus = list(set(skus)) _cart = {} for sku in distinct_skus: _cart[sku] = skus.count(sku) return _cart
b6fc7f5e36b113f37eadd03b44efd1a9b14dbed2
392,663
def copy_event_attributes(ev1, ev2): """Copy all attributes from one roxar event to another. Args: ev1: roxar event to copy into ev2: roxar event to copy attributes from Returns: An updated version of ev1. Unaltered if the two events are not of same type. """ if ev1.type == ev2.type: for key in ev1.attribute_keys: ev1[key] = ev2[key] return ev1
27f2c8539d832a6d641cf0b91b67c348ff12a7c8
683,561
from typing import Dict import re def isis3_to_dict(instr: str)-> Dict: """ Given a stdout string from ISIS3, return a Dict version :param instr: :return: dictionary of isis output """ groups = re.findall(r'Group([\S\s]*?)End_Group', instr) out = {} for group in groups: lines = [x.replace('=', '').split() for x in group.split('\\n')] group_name = lines[0][0] out[group_name] = {t[0]: t[1] for t in lines[1:-1]} return out
de2b9947058bb0d5a83018ad2522bfc9daeb8907
509,321
import torch def apply_distance_bound(data_region_i, data_region_i_orig, args): """ Input: data_region_i: (S,3) tensor, current region i points data_region_i_orig: (S,3) tensor, original region i points Return: data_region_i: modified data_region_i count: number of points that exceed distance bound """ with torch.no_grad(): region_i_diff = data_region_i - data_region_i_orig #(S,3) region_i_diff_distance = torch.norm(region_i_diff, dim=1) #(S,) total_points = region_i_diff_distance.shape[0] count = 0 for i in range(total_points): # check bound if region_i_diff_distance[i] > args.dist_threshold: count += 1 data_region_i[i].data = data_region_i_orig[i].data + args.dist_threshold * region_i_diff[i] / region_i_diff_distance[i] return data_region_i, count
0cf40bfbc83ad3c2104c969bb5faaace6b88d5db
689,419
import math def convert_timeval(seconds_since_epoch): """Convert time into C style timeval.""" frac, whole = math.modf(seconds_since_epoch) microseconds = math.floor(frac * 1000000) seconds = math.floor(whole) return seconds, microseconds
204bed6494250045c2648e0f6826fbf0f30936ce
244,902
import torch def cumavg(a: torch.FloatTensor): """ Parameters ---------- a : torch.tensor Shape: l,b,e Returns ------- Cumulative average : torch.tensor Shape : l,b,e avg over l dimension """ # l,b,e cumsum = torch.cumsum(a, 0) batch_size = cumsum.shape[1] # Note that for the shorter sequences the cum avg is not correct # after the last time step of that sequence. # This is corrected in the loss calculation. lengths = torch.arange(1, 1 + cumsum.shape[0], device=a.device) return cumsum / lengths.reshape(cumsum.shape[0], 1, 1)
5f4b3cc45a25911c8c586b34dd21baa27c9f4105
603,014
def get_geometry_type(gi): """ Return the geometry type from a __geo_interface__ dictionary """ if gi["type"] == "Feature": return get_geometry_type(gi["geometry"]) elif gi["type"] in ("FeatureCollection", "GeometryCollection"): return get_geometry_type(gi["geometries"][0]) else: return gi["type"]
6e1292863dd45933c59e84cc465d3ade53248c08
34,102
def get_ff_par(atom_name, ff_parameters): """ Get sigma and epsilon values for given atom name and force field parameters dictionary. """ atom_index = ff_parameters['atom'].index(atom_name) sigma = ff_parameters['sigma'][atom_index] epsilon = ff_parameters['epsilon'][atom_index] return sigma, epsilon
a57548f91c833cd690f8a53202bb3fe564ddfb14
150,640
def get_metric(mod, obs, fun, dim='time', verbose=False): """Calculate a metric along a dimension. Metrics implemented: name * correlation > corr * mse > mse * rmse > rmse * mean percentage error > mpe * standard deviation ratio > stdratio * bias > bias * bias squared > bias2 * phaseerr > phaseerr * varerr > varerr * robust modeling effficiency > nse_rob * modeling effficiency > nse Only values present in both datasets are used to calculate metrics. Parameters ---------- data: xarray.Dataset Dataset with data variables 'mod' (modelled) and 'obs' (observed). fun: Callable A function that takes three arguments: Modelled (xarray.DataArray), observed (xarray.DataArray) and the dimension along which the metric is calculated. dim: str The dimension name along which the metri is calculated, default is `time`. Returns ---------- xarray.Dataset """ return fun(mod, obs, dim)
f70caba77a464f8ed55da0daa2c4b4eb5a45a4e6
646,536
def get_unique(list_): """Returnerar en lista dΓ€r varje vΓ€rde bara fΓΆrekommer en gΓ₯ng. """ return list(set(list_))
b1380522fa407157d03b4d5b87daed73eed70b8d
26,117
import re def is_countable_discussions_uri(uri): """ Determines if the URI should be counted at all. Some URIs, like static css, need to be skipped. Arguments: uri (string): The URI of the request. Returns: True if the request should be counted, False otherwise. """ return re.search('/static/css/discussion', uri) is None
88a4b8f3ea544653cdb7873ebdffb4e165989bbc
345,397
def find_missing_int_using_sum(complete, incomplete): """ Problem: There are distinct integers in list `complete`. The same integers are in list `incomplete`, except for one. Task: Find the one integer which is missing from the incomplete list. Complexity: O(n) time, O(1) space """ s = 0 for a, b in zip(complete[:-1], incomplete): s += a s -= b return s + complete[len(complete) - 1]
6136b77ac7b2b3482b6258873fc6429082dc9ba2
268,048
def get_reads_in_interval(sam, chrom, start, end): """ Given an open pysam.AlignmentFile, return only the reads that overlap the provided interval. Note that this means there may be reads that extend beyond the bounds of the interval. """ iterator = sam.fetch(chrom, start, end) reads = [ x for x in iterator ] return reads
9fa6338ee9ed8ad0524e045c5cd42450dbb6bcec
540,227
import itertools def concat(iterables): """Return iterables concatenated into one iterable. This is just a shortcut to itertools.chain.from_iterable. If you're only using this library for concat, consider using the itertools version instead. >>> list(concat([[1, 2, 3], [4, 5, 6]])) [1, 2, 3, 4, 5, 6] """ return itertools.chain.from_iterable(iterables)
c8b0bc987e353d97c6c045479671e9d685cdc427
377,414
def _extended_gcd(a, b): """Returns (g, x, y) such that a*x + b*y = g = gcd(a, b)""" x, x_old, y, y_old = 0, 1, 1, 0 while a != 0: (q, a), b = divmod(b, a), a y, y_old = y_old, y - q * y_old x, x_old = x_old, x - q * x_old return b, x, y
e96a65990cc9e6165867ccfd7756dcf0ae2b33d8
695,681
def interpolate_weather_features(dataset): """ Linear time-based interpolate of temperature and humidity features. """ dataset = dataset.copy() for feature in ["temp", "humidity"]: dataset[feature] = dataset[feature].interpolate( method="time", limit_direction="both" ) return dataset
c29d53a0ec41df9175891f359cbe848b5cc5263e
506,518
def mapRangeParam(u, min_u, max_u, min_v, max_v): """ Linearly map a floating point value u in the range [min_u, max_u] to a value v in the range defined by [min_v, max_v]. Args: See description. Return: Floating point value v """ assert type(u) is float assert type(min_u) is float assert type(max_u) is float assert type(min_v) is float assert type(max_v) is float return ((max_v - min_v) * (u - min_u)) / float(max_u - min_u) + min_v
946c13f19e34b6f3007004b871b515652c96dd9e
600,104
from typing import List def min_candies(n: int, ratings: List[int]) -> int: """ Return the minimum amount of candies to give to students. Candies are assigned using the following rules: 1. Every student gets at least 1 candy 2. If 2 students are next to each other, the one with the higher rating must receive at least one more candy. 3. The candies given should be minimized. :time: O(n) where n is the amount of students :space: O(n) """ candies = [1] * n for i, (r1, r2) in enumerate(zip(ratings, ratings[1:])): if r2 > r1: candies[i + 1] = candies[i] + 1 ratings, candies = ratings[::-1], candies[::-1] for i, (r1, r2) in enumerate(zip(ratings, ratings[1:])): if r2 > r1 and candies[i + 1] <= candies[i]: candies[i + 1] = candies[i] + 1 return sum(candies)
656b4a8fd1754d51795b4e80cca0a42a7dab46b7
71,836
def bpe_postprocess(string, bpe_type="subword-nmt") -> str: """ Post-processor for BPE output. Recombines BPE-split tokens. :param string: :param bpe_type: one of {"sentencepiece", "subword-nmt"} :return: post-processed string """ if bpe_type == "sentencepiece": ret = string.replace(" ", "").replace("▁", " ").strip() elif bpe_type == "subword-nmt": ret = string.replace("@@ ", "").strip() else: ret = string.strip() return ret
e52f3fcec60f6c21df3f7181b3f00b35cf4d6229
517,420
def sumar_lista(lista): """suma un conjunto de valores en una lista """ suma = 0 for numero in lista: suma += numero return suma
b80dc6ac4fed32f87ddb23b6b1a7cba6a7f443d9
284,516
import importlib import time def time_algo(call_string, module_name): """ Times the execution of a python call string. :param call_string: str string that calls a python module and executes an algorithm :param module_name: str name of module from which function is called :return run_time: float time in seconds required to execute python call string """ module = importlib.import_module(module_name) start = time.time() exec(call_string) finish = time.time() run_time = finish - start return run_time
f24e708c04a765487b3c009b7ef5f9929e4c885b
47,638
def either(f,g): """Returns a function that returns True if either f or g returns True.""" def apply_to(*args, **kwargs): return f(*args, **kwargs) or g(*args, **kwargs) return apply_to
d7ba8719cb625026e8c0ce21cb7ab904bdb53ebe
449,774
def invalid_timestamps(invalidation_stamps, invalidate_cut) : """ Helper function for invalid_timebins Return a dataframe with the timestamps, for which all timebins in between have to be invalidated """ invalidation_stamps['timediff'] = invalidation_stamps['timestamp'].diff().fillna(0).astype(int) return invalidation_stamps[invalidation_stamps['timediff'] > invalidate_cut]
7c1768ae6af657f0e95d0aa40404825c231f1984
458,913
def getKey(word): """ Used to sort words by first character """ return word[0]
d80be6550a1b35e77da3571aa7e24683d361dd76
151,365
def reset_env(env): """Resets the pendulum in the safe area.""" env.reset() env.env.state = env.np_random.uniform(low=[-0.1, -0.5], high=[0.1, 0.5]) env.env.last_u = None return env.env._get_obs()
55c987fb8bd9011d5fe16e70828bd4acac2b6be6
18,998