content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import Dict from typing import Any from typing import List def set_user_defined_functions( fha: Dict[str, Any], functions: List[str] ) -> Dict[str, Any]: """Set the user-defined functions for the user-defined calculations. .. note:: by default we set the function equal to 0.0. This prevents Sympy errors resulting from empty strings. :param fha: the functional hazard assessment dict. :param list functions: the list of functions; list items are str. :return: fha; the functional hazard assessment dict with updated functions. :rtype: dict """ _key = "" for _idx in [6, 7, 8, 9, 10]: try: _key = list(fha.keys())[_idx] if str(functions[_idx - 6]) == "": fha[_key] = "0.0" else: fha[_key] = str(functions[_idx - 6]) except IndexError: fha[_key] = "0.0" return fha
5078168319e4360f3ac3b3c02087439e5175da07
654,195
import math def compute_colors(n, cols): """Interpolate a list of colors cols to a list of n colors.""" m = len(cols) lst = [] for i in range(n): j = math.floor (i * (m - 1.0) / (n - 1.0)) k = math.ceil (i * (m - 1.0) / (n - 1.0)) t = (i * (m - 1.0) / (n - 1.0)) - j (r0, g0, b0) = cols[int(j)] (r1, g1, b1) = cols[int(k)] r = min(255, max(0, int (0.5 + (1.0 - t) * r0 + t * r1))) g = min(255, max(0, int (0.5 + (1.0 - t) * g0 + t * g1))) b = min(255, max(0, int (0.5 + (1.0 - t) * b0 + t * b1))) lst.append((r,g,b)) return lst
121f2080faa3ca9d574572a47c231fc525ee74fe
651,450
def output_dest_addr(pnr, settings): """ Output destination address in pnr header. """ return settings.dest_addr
91d6bd55bbabdc58ce1c5d7770960c40134cac7a
403,713
import torch def cos_dist_mat(x, y): """ Cos similarity :param x: [n1, d] :param y: [n2, d] :return: [n1, n2] """ n1, d1 = x.shape n2, d2 = y.shape assert d1 == d2 if n1 * n2 > 10000: D = torch.cat( [torch.cosine_similarity(x[i].view(1, 1, d1), y.view(1, n2, d2), -1) for i in range(n1)], 0 ) else: D = torch.cosine_similarity(x.reshape(n1, 1, d1), y.reshape(1, n2, d2), -1) return 1 - D
148a93e3f55bdfafe99dcca49beb488ae85ca5e8
377,118
import collections def countSeqs(seqs): """ Count n-gram statistics on a collection of sequences. Parameters ---------- seqs : iterable( iterable(Hashable) ) Returns ------- bigram_counts : collections.defaultdict((Hashable, Hashable) -> int) unigram_counts : collections.defaultdict(Hashable -> int) initial_counts : collections.defaultdict(Hashable -> int) final_counts : collections.defaultdict(Hashable -> int) """ bigram_counts = collections.defaultdict(int) unigram_counts = collections.defaultdict(int) initial_counts = collections.defaultdict(int) final_counts = collections.defaultdict(int) for seq in seqs: initial_counts[seq[0]] += 1 final_counts[seq[-1]] += 1 for state in seq: unigram_counts[state] += 1 for prev, cur in zip(seq[:-1], seq[1:]): bigram_counts[prev, cur] += 1 return bigram_counts, unigram_counts, initial_counts, final_counts
6c692c25c42e9e381f792f0674a96e0107a11e30
390,062
def check_scenarios(scenes): """ Make sure all scenarios have unique case insensitive names """ assert len(scenes) == len(dict((k.lower(), v) for k, v in scenes)) return scenes
c9b437d396a4d0ca17c17a85cb99e8574cc78fe3
22,127
def perfect_weighted_distance(reference: dict) -> int: """ The nature of the algorithm causes some double counting to occur when we are recording contig lengths. Here we find out what the "length" of a completely correct assembly would be, and use that as the max length. Args: reference: The reference AGP dictionary. Returns: The maximum possible length from a completely correct assembly. """ contigs, positions = reference length = 0 for index in positions: # Get the total length for each edge. for contig_name, orientation in positions[index]: length += contigs[contig_name]["length"] return length
99748e7b321b9474b3bc7be883d981044070fa7e
175,494
from time import sleep def fatorial(num, show=False): """ -> O fatorial calcula o fatorial de um número :param num: O número a ser calculado o fatorial :param show: (opcional) Mostra ou não a conta :return: O valor do Fatorial de um número n """ fat = 1 print('-'*30) if show: print('Os cálculos são: ', end='') for c in range(num, 0, -1): fat *= c # Calcula o fatorial if show: # Caso show seja True, mostra o calculo do fatorial print(c, end='') sleep(0.5) if c != 1: print(end=' x ') else: print(end=' = ') return fat
f0ea977f5164a25106ce7768b725962bf4ed9af1
140,716
def is_leap(year: int) -> bool: """Returns True if year is a leap year :param: year: Represents a year :returns: bool: True if year is a leap year """ # Leap years are years that are divisible by 4, and not by 100 OR are divisible by 400! CASE1: bool = year % 4 == 0 CASE2: bool = year % 100 == 0 CASE3: bool = year % 400 == 0 return CASE1 and not CASE2 or CASE3
2a414ebae6a80139c1614853cbd25c1520149a88
58,699
def _dotted_path(segments): """Convert a JS object path (``['dir/', 'file/', 'class#', 'instanceMethod']``) to a dotted style that Sphinx will better index.""" segments_without_separators = [s[:-1] for s in segments[:-1]] segments_without_separators.append(segments[-1]) return '.'.join(segments_without_separators)
58219b0ff710438401f9c2ef773db0b17a3f3d32
401,354
import math def get_distance_between_coords(point1, point2): """ Computes the distance between to GPS coordinates. :param point1: :param point2: :return: distance between two GPS coordinates in meters """ R = 6371000 lat1 = math.radians(point1[0]) lat2 = math.radians(point2[0]) lat = math.radians(point2[0] - point1[0]) lon = math.radians(point2[1] - point1[1]) a = math.sin(lat / 2) * math.sin(lat / 2) + math.cos(lat1) * math.cos(lat2) * math.sin(lon / 2) * math.sin(lon / 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) distance = R * c return distance
15d78a6f6d52aeb64ce2917befb39bdda550d950
211,068
def check_views(views): """Checks which views were selected.""" if views is None: return range(3) views = [int(vw) for vw in views] out_views = list() for vw in views: if vw < 0 or vw > 2: print('one of the selected views is out of range - skipping it.') out_views.append(vw) if len(out_views) < 1: raise ValueError( 'Atleast one valid view must be selected. Choose one or more of 0, 1, 2.') return out_views
edfd0234cb1c8955176f52f53d74bd0f0cfbdeb6
345,647
def get_mean_intersection_over_union(scores): """ MIOU generated by averaging IOUs of each class, unweighted :param scores: an array of ClassificationScore objects :return: 0->1 where 1 implies perfect intersection """ iou = 0 for score in scores: iou += score.get_iou() if len(scores) < 1: return 0 return iou / len(scores)
55c469c2a7ddfffb93e89f9cef14d386bd455756
445,812
import socket def port_is_open(adr, timeout=1, attempts=1): """ Returns `true` if the tcp address *ip*:*port* is reachable. Parameters: * adr The network address tuple of the target. * timeout Time in seconds waited until a connection attempt is considered to be failed. * attempts Number of port checks done until the *adr* is considered to be unreachable. """ for i in range(attempts): s = socket.socket() s.settimeout(timeout) try: s.connect(adr) except Exception as err: pass else: return True finally: s.close() return False
859ae0217056f83870fee49959fc15e8ca3ce10f
549,553
import hashlib from pathlib import Path def dir_hash(directory): """ Return single md5 hash of all filenames and file contents in directory, for comparison. """ hash = hashlib.md5() path = Path(directory) if not path.exists(): raise FileNotFoundError for file in sorted(path.glob('**/*')): hash.update(bytes(str(file), 'utf8')) if file.is_file(): hash.update(file.read_bytes()) return hash.hexdigest()
0212555485be8c6c4a7193c5699b0d7bbc1bcf76
161,624
def generate_select(data, id=''): """Generates a select box from a tuple of tuples .. code-block:: python generate_select(( ('val1', 'Value One'), ('val2', 'Value Two'), ('val3', 'Value Three') )) will return:- .. code-block:: html <select id=''> <option value="val1">Value One</option> <option value="val2">Value Two</option> <option value="val3">Value Three</option> </select> :param data: This is a tuple of tuples (can also be a list of lists. But tuples will behave more efficiently than lists and who likes mutation anyway? :rtype: :class:`str`/HTML """ output = [] out = output.append out('<select id="%s">\n' % id) for option in data: out('\t<option value="%s">%s</option>\n' % (option[0], option[1])) out('</select>') return ''.join(output)
93018131712ad662ace2c66ba66afbd14bb22364
199,589
def enough(cap: int, on: int, wait: int) -> int: """ The driver wants you to write a simple program telling him if he will be able to fit all the passengers. If there is enough space, return 0, and if there isn't, return the number of passengers he can't take. You have to write a function that accepts three parameters: cap is the amount of people the bus can hold excluding the driver. on is the number of people on the bus. wait is the number of people waiting to get on to the bus. :param cap: :param on: :param wait: :return: """ if cap - on < wait: return wait - (cap - on) return 0
3333d31c1de9d77871ee15ca3960dc83898371e3
268,905
def parse_word_list_file(file_name): """Creates a jumble dictionary from a file containing a list of words Args: file_name (str): Name of file containing a list of words Returns: dict[list[str]]: Jumble dictionary, keys are sorted words, values are list of original words """ jumble_dict = {} with open(file_name, "r") as word_list: for word in word_list: word = word.strip() # Removing trailing and whitespace characters # Sorting the word, so we can easily find anagrams key = "".join(sorted(word.lower())) # setdefault searches for a key and returns its value if found. If not found, creates a list on that key jumble_dict.setdefault(key, []).append(word) return jumble_dict
e72ec50d5cae539a76566063d1f5cde5b4797194
533,052
def _displacements_yaml_lines_type1_info(dataset): """Return lines of displacement-pair summary.""" n_single = len(dataset["first_atoms"]) n_pair = 0 n_included = 0 for d1 in dataset["first_atoms"]: n_pair += len(d1["second_atoms"]) for d2 in d1["second_atoms"]: if "included" not in d2: n_included += 1 elif d2["included"]: n_included += 1 lines = [] lines.append("displacement_pair_info:") if "cutoff_distance" in dataset: lines.append(" cutoff_pair_distance: %11.8f" % dataset["cutoff_distance"]) lines.append(" number_of_singles: %d" % n_single) lines.append(" number_of_pairs: %d" % n_pair) if "cutoff_distance" in dataset: lines.append(" number_of_pairs_in_cutoff: %d" % n_included) # 'duplicates' is dict, but written as a list of list in yaml. # See the docstring of _parse_fc3_dataset for the reason. if "duplicates" in dataset and dataset["duplicates"]: lines.append(" duplicated_supercell_ids: " "# 0 means perfect supercell") # Backward compatibility for dict type if type(dataset["duplicates"]) is dict: for disp1_id, j in dataset["duplicates"].items(): lines.append(" - [ %d, %d ]" % (int(disp1_id), j)) else: for (disp1_id, j) in dataset["duplicates"]: lines.append(" - [ %d, %d ]" % (disp1_id, j)) lines.append("") return lines
0ca1bdf92be1302b1dbbd9a08ee933c7ff626884
442,653
def split(path, blocksize): """ Split the file into blocks according to the given size. Input: `path`: video path `blocksize`: size of each block Output: list contain splitted blocks """ with open(path, 'rb') as video: byte_content = video.read() length = len(byte_content) splitted = list() for i in range(0, length, blocksize): splitted.append(byte_content[i: i + blocksize]) return splitted
cb428b555cc08f31a34ccada2db6b0cb09981456
492,754
def image_transform_crop(img, new_shape=[160, 160]): """ Crops an image to new dimensions (assumes you want to keep the centre) Parameters ---------- img : np.ndarray Input image new_shape : tuple Expected new shape for image Returns ------- np.ndarray Reshaped image """ delt_0 = (img.shape[0] - new_shape[0]) // 2 delt_1 = (img.shape[1] - new_shape[1]) // 2 return img[delt_0:img.shape[0] - delt_0, delt_1:img.shape[1] - delt_1]
b7e0f3ab30b813addd5ce2a556b693ee1ab38df8
122,151
def UppercaseMutator(current, value): """Uppercase the value.""" return current.upper()
ee16c6a92cd7e3f4d55859d22ab7a23c0a39af9b
624,764
def calc_gene_length(file_path): """Read in a FASTA file and calculate sequence length. Assumes a typical one line header for a FASTA file. **Parameters** file_path : str Path to FASTA file **Returns** seq_len : int length of gene """ with open(file_path) as handle: handle.readline() # skip FASTA header seq = handle.read() # read file into single string seq = seq.replace('\n', '') # remove line breaks seq_len = len(seq) return seq_len
f95504a4aebd8b4a81373aaa7a342a98a5579068
88,726
def make_valid_latex_string(s: str) -> str: """ Take a string and make it a valid latex math string by wrapping it with "$"" when necessary. Of course, strings are only wrapped with the "$" if they are not already present. Args: s: The input string. Returns: The properly formatted string. """ if s == "": return s if not s.startswith("$"): s = "$" + s if not s.endswith("$"): s = s + "$" return s
9f2355cb7a4f59efa47f40a746149804ac5f0229
79,804
def user_query(query: str = 'save?', entry: list = ['y', 'n']): """ user_query(query, choices) Args: query (str, optional): [promt the user with this]. Defaults to 'save?'. choices (list, optional): [what do they type for yay/nay]. Defaults to ['y','n']. Returns: [bool]: pick, True/False for entry[0,1] respectively [bool]: stop, True indicates the user typed 'exit' """ while True: user_save = input(f"{query} {entry[0]}/{entry[1]}: ").lower() if user_save == entry[0]: pick, stop = (True, False) elif user_save == entry[1]: pick, stop = (False, False) elif user_save == "exit": pick, stop = (False, True) else: print(f"only type {entry[0]} or {entry[1]}." "You can exit by typing 'exit'") continue break return (pick, stop)
f0141bd6780875d9db02c4a390a04bb1ef34a189
449,964
def filter_dictionary(dict_data, values): """ Create a subset of a dictionary given keys found in a list. The incoming dictionary should have keys that point to dictionary's. Create a subset of that dictionary by using the same outer keys but only using the inner key:val pair if that inner key is found in the values list. Parameters: dict_data (dictionary): a dictionary that has keys which point to dictionary's. values (list): a list of keys to keep from the inner dictionary's of 'dict_data' Returns: a dictionary """ new_dict = {} for key, val in dict_data.iteritems(): new_dict[key] = {} for sub_key, sub_val in val.iteritems(): if sub_key in values: new_dict[key][sub_key] = sub_val return new_dict
6b5a0266cd9ff2ef484451bc73231d9e7d9a5a1b
356,601
def cli(ctx, dataset_collection_id, maxwait=12000, interval=3, proportion_complete=1.0, check=True): """Wait until all or a specified proportion of elements of a dataset collection are in a terminal state. Output: Details of the given dataset collection. """ return ctx.gi.dataset_collections.wait_for_dataset_collection(dataset_collection_id, maxwait=maxwait, interval=interval, proportion_complete=proportion_complete, check=check)
1f19003c2666acbbd2d144bb641433e516a7832a
93,098
def get_ttt(jd:float): """ Get Julian centuries. Args: jd (float): Julan day number. Returns: (float): Julian centuries. """ return (jd - 2451545.0) / 36525.0
1a934050e3303f522619f2723fdfa3d5c8dc46b6
508,457
def format_includes(includes): """ Format includes for the api query (to {'include' : <foo>,<bar>,<bat>}) :param includes: str or list: can be None, related resources to include :return: dict: the formatted includes """ result = None if isinstance(includes, str): result = includes elif isinstance(includes, list): result = ','.join(includes) return {'include': result} if result is not None else {}
9f15ac9b767b6612794bec7b14427b6f90d4a734
39,067
def lerp(a, b, t): """Linear interpolation.""" return t * b + (1.0 - t) * a
d637b00ba383c48c5e588bada9063320ba2ca5d7
176,323
def read_word_list(filename): """ This function reads the file and returns the words read. It expects a file where each word is in a line. """ # Initialize words list words = [] # Quick'n'dirty file reading with open(filename) as words_file: for line in words_file: words.append(line.strip()) # and we're done return words
e93af9a9d169fb7f2f066c63eccc29f0bbbbf176
194,404
import hashlib def hash100(s: str): """ Hash a string into 1~100. Useful when you split a dataset into subsets. """ h = hashlib.md5(s.encode()) return int(h.hexdigest(), base=16) % 100 + 1
0686d599e42c104d487462682340f04bd3fe94b4
34,069
def int_to_bits(int_str, qubit_count): """ Convert a number (possibly in string form) to a readable bit format. For example, the result '11', which means both qubits were measured as 1, is returned by the API as "3". This converts that output to the readable version. Args: int_str: A string or integer in base 10 that represents the measurement outcome. Returns: A string of 0's and 1's """ # convert to an integer, then generate the binary string # remove the "0b" prefix from the binary string # then pad (using zfill) with 0's return str(bin(int(int_str)))[2:].zfill(qubit_count)
1a2d25281c2ec9ff4e885daeba0ff76448bfbf0a
287,691
def Yes_No(value, collection): """ True -> "Yes", green background ; False->"No",red, None to "-",no color """ if value: return ("Yes", "black; background-color:green;") if value == None: return ("-", "black") return ("No", "black; background-color:red;")
856b729ebc5eae2a6e0d1e8ad5a8abc1a649dab3
438,298
def AddFieldToUpdateMask(field, patch_request): """Adds name of field to update mask.""" if patch_request is None: return None update_mask = patch_request.updateMask if update_mask: if update_mask.count(field) == 0: patch_request.updateMask = update_mask + ',' + field else: patch_request.updateMask = field return patch_request
46634d00751fb05b19199caf67983d4e6275c21c
282,206
def get_predictor_cost(x, y, rho, sens, spec, cov): """ Calculate the predictor's cost on a point (x, y) based on the rho and its sensitivity, specificity and coverage """ return x * ((rho * cov * (1 - sens)) + cov - 1) + y * (((1 - rho) * cov * (1 - spec)) + cov - 1) + 1 - cov
f3e88b9cd16a26e0de8f5c54ae12fb0800b1e17d
100,651
def extend_to_blank(l): """ Transform ['','a','','b',''] into ['','a','a','b','b'] """ out, ext = [], '' for i in l: ext = i or ext out.append(ext) return out
72cb2e398df24d7360d69e1f382fd8e62915b786
358,817
def tokey(item): """ Key function for sorting filenames """ return int(item.split("_")[-1].split(".")[0])
fd53b1306081e1fc63312f2d491cd4994c5d158a
253,939
def cryostat_losses(Acr, dT=228.0): """ Calculating the cryostat losses according to doi: 10.1088/1742-6596/97/1/012318 :param Acr: the surface of the cryostat # m2 :param dT: temperature difference between the maximum outside temperature (40C) and the working temp :return: losses of the cryostat """ k_th = 2.0 * 1e-3 # W/(mK) d_th = 50.0 * 1e-3 # mm - thermal insulation thickness # the windings considered to work at 65 K -> dT = 293 - 65 = 228 return round(k_th / d_th * Acr * 1e-6 * dT, 2)
be065f91f2a0ad943b80269bcb55bea4ab30f2d0
376,518
def first(iterable, or_=None): """Get the first element of an iterable. Just semantic sugar for next(it, None). """ return next(iterable, or_)
ef7f2d9834defe477830ca7dd5851644e904d2e7
64,824
def selection_sort(items): """Implementation of selection sort where a given list of items are sorted in ascending order and returned""" for current_position in range(len(items)): # assume the current position as the smallest values position smallest_item_position = current_position # iterate through all elements from current position to the end including current position for location in range(current_position, len(items)): # check if an item exists which is less in value than the value in most recent smallest item if items[location] < items[smallest_item_position]: smallest_item_position = location # Interchange the values of current position and the smallest value found in the rest of the list temporary_item = items[current_position] items[current_position] = items[smallest_item_position] items[smallest_item_position] = temporary_item return items
45d94530d5a43175da7ea60bb0ee22ce24d6bded
431,244
import struct def _read_entry(f, type): """Read a protobuf entry from a pirate log file. The pirate stores protobuf messages with the length prepended to the message as an uin32_t. This function first reads the uint32_t to determine the length of the entry and then the message itself. The message is then parsed using the appropriate protobuf class. Arguments: f - Input file. type - Protobuf entry type. Returns: Message object on success, None on orderly EOF. Exceptions: RuntimeError on EOF in the middle of a message. """ # Read the entry length (uint32_t) packet_head = f.read(4) if len(packet_head) == 0: return None elif len(packet_head) != 4: raise RuntimeError('Unexpected EOF while reading packet length') (length, ) = struct.unpack("@I", packet_head) pb_message = type() packet = f.read(length) if len(packet) == 0: return None elif len(packet) != length: raise RuntimeError('Unexpected EOF while reading packet') pb_message.ParseFromString(packet) return pb_message
bbcc78fae9571cf66dd7b88be8bbc572d57bf98c
232,831
def join_string(value): """ Can be used to join strings with "-" to make id or class """ return value.lower().replace(' ', '-')
43fa0ebc7906d2f3963731cd8f74e24b82223bd2
557,856
import torch def entropic_loss(pnl) -> torch.Tensor: """ Return entropic loss function, which is a negative of the expected exponential utility: loss(pnl) = -E[u(pnl)], u(x) = -exp(-x) Parameters ---------- pnl : torch.Tensor, shape (*) Profit-loss distribution. Returns ------- entropic_loss : torch.Tensor, shape (,) Examples -------- >>> pnl = -torch.arange(4.0) >>> entropic_loss(pnl) tensor(7.7982) """ return -torch.mean(-torch.exp(-pnl))
7a8ea92d344fbb3e9effb2c3af9214f28773e12e
457,506
def reconstruct_path_to_destination(prev, end): """ Constructs an in-order sequence of (x,y) coordinates (list of tuples) to the end destination using the mapping from nodes to their predecessors (prev). """ path = [end] curr = end while curr in prev.keys(): curr = prev[curr] path.insert(0, curr) return path
7a77fae4d438ec1f2a48b371ac16be31cda0e7db
484,798
def get_high_pin_idx_pull_up(pin_input_values): """Returns the index of the first high pin value. Assumes Pull UP mode. i.e. a stronger force will pull the pin value down to 0. If no high pins were found, None is returned.""" high_pin_idx = ( pin_input_values.index(False) if False in pin_input_values else None ) return high_pin_idx
92cad383e5bfa7775a800629fc2650c2070177d1
316,169
import csv def open_metadata_records(fn='nandq_internet_archive.txt'): """Open and read metadata records file.""" with open(fn, 'r') as f: # We are going to load the data into a data structure known as a dictionary, or dict # Each item in the dictionary contains several elements as `key:value` pairs # The key matches the column name in the CSV data file, # along with the corresponding value in a given item row # Read the data in csv_data = csv.DictReader(f) # And convert it to a list of data records data_records = list(csv_data) return data_records
7d75b08f90f4e15636aa21030e065c8cbbe48931
233,853
def get_trial_results(trial): """Format results from a `orion.core.worker.trial.Trial` using standard structures.""" results = dict() lie = trial.lie objective = trial.objective if lie: results["objective"] = lie.value elif objective: results["objective"] = objective.value else: results["objective"] = None results["constraint"] = [ result.value for result in trial.results if result.type == "constraint" ] grad = trial.gradient results["gradient"] = tuple(grad.value) if grad else None return results
1569b0b0f77e5c3c5416c6a3a4e1e181b4dadda9
247,141
import math def _raw_stat(base: int, ev: int, iv: int, level: int, nature_multiplier: float) -> int: """Converts to raw stat :param base: the base stat :param ev: Stat Effort Value (EV) :param iv: Stat Individual Values (IV) :param level: pokemon level :param nature_multiplier: stat multiplier of the nature (either 0.9, 1 or 1.1) :return: the raw stat """ s = math.floor( (5 + math.floor((math.floor(ev / 4) + iv + 2 * base) * level / 100)) * nature_multiplier ) return int(s)
0f3947ce5b73d629acc830f6f6275013e1de995f
559,850
def _target_ads_in_campaign_to_user_list( client, customer_id, campaign_id, user_list_resource_name ): """Creates a campaign criterion that targets a user list with a campaign. Args: client: an initialized GoogleAdsClient instance. customer_id: a str client customer ID used to create an campaign criterion. campaign_id: a str ID for a campaign used to create a campaign criterion that targets members of a user list. user_list_resource_name: a str resource name for a user list. Returns: a str resource name for a campaign criterion. """ campaign_criterion_operation = client.get_type("CampaignCriterionOperation") campaign_criterion = campaign_criterion_operation.create campaign_criterion.campaign = client.get_service( "CampaignService" ).campaign_path(customer_id, campaign_id) campaign_criterion.user_list.user_list = user_list_resource_name campaign_criterion_service = client.get_service("CampaignCriterionService") response = campaign_criterion_service.mutate_campaign_criteria( customer_id=customer_id, operations=[campaign_criterion_operation] ) resource_name = response.results[0].resource_name print( "Successfully created campaign criterion with resource name " f"'{resource_name}' targeting user list with resource name " f"'{user_list_resource_name}' with campaign with ID {campaign_id}" ) return resource_name # [END setup_remarketing_4]
28944ade1b3d2d3e94fc39bf1fffc1887d8be868
82,012
import ipaddress def cidr_stix_pattern_producer(data): """Convert a CIDR from TC to a STIX pattern.""" if isinstance(ipaddress.ip_network(data.get('summary'), strict=False), ipaddress.IPv6Network): return f"[ipv6-addr:value = '{data.get('summary')}']" return f"[ipv4-addr:value = '{data.get('summary')}']"
56f7d94fef6d913d6c2bd3afa48d747dc5a4c549
110,115
def raman_intensity(R): """ Scattering intensity Args: R (3x3 numpy array): Susceptibility tensor Returns: float: scattering intensity according to Eq 7, Phys Rev B 73, 104304 2006 """ return ( 4 * (R[0, 0] ** 2 + R[1, 1] ** 2 + R[2, 2] ** 2) + 7 * (R[0, 1] ** 2 + R[0, 2] ** 2 + R[1, 2] ** 2) + R[0, 0] * R[1, 1] + R[0, 0] * R[2, 2] + R[1, 1] * R[2, 2] )
82e7f4caed5fe38f15c60a585aa31bd33498d30d
602,639
def kilometer2meter(dist): """ Function that converts km to m. """ return dist * 1000
508fdf4758353d4a128e95c3933ac30f5fe23988
515,490
def get_lastkey(path, delimiter="/"): """ Return name of the rightmost fragment in path """ return path.split(delimiter)[-1]
0e2287c95974466fae70b15c75aca11d5ed02795
662,580
def find(predicate, array): """ find_.find(list, predicate, [context]) Alias: detect Looks through each value in the list, returning the first one that passes a truth test (predicate), or undefined if no value passes the test. The function returns as soon as it finds an acceptable element, and doesn't traverse the entire list. var even = _.find([1, 2, 3, 4, 5, 6], function(num){ return num % 2 == 0; }); => 2 """ for x in array: if predicate(x): return x return None
33d12b252336cf32399f707b9bbfb8a82df89287
367,708
def fp_tuple(fp): """ Build a string that uniquely identifies a key """ # An SSH public key is uniquely identified by the tuple [length, hash, type]] # fp should be a list of results of the `ssh-keygen -l -f` command return ' '.join([fp[0], fp[1], fp[-1]])
b282997ab21cf3a10fb3b8b305fc855077f26081
386,600
def rc_to_xy(row, col, rows): """ Convert from (row, col) coordinates (eg: numpy array) to (x, y) coordinates (bottom left = 0,0) (x, y) convention * (0,0) in bottom left * x +ve to the right * y +ve up (row,col) convention: * (0,0) in top left * row +ve down * col +ve to the right Args: row (int): row coordinate to be converted col (int): col coordinate to be converted rows (int): Total number of rows Returns: tuple: (x, y) """ x = col y = rows - row - 1 return x, y
529f88a99ba5c3143c528df3eb844be834568c20
654,117
def _split_repo_url(url): """Split a repository URL into an org / repo combination.""" if "github.com/" in url: end = url.split("github.com/")[-1] org, repo = end.split("/")[:2] else: raise ValueError( f"Currently Binder/JupyterHub repositories must be on GitHub, got {url}" ) return org, repo
47a7a80cbf4d58b2874472ae337eca3a1d6a12ac
526,987
from typing import Iterable import six def is_iterable(arg): """ Returns True if object is an iterable and is not a string, false otherwise """ return isinstance(arg, Iterable) and not isinstance(arg, six.string_types)
07664a14d6ac3fbf754d37ae4ae1057a6bfa31c3
355,414
def calc_temp(Data_ref, Data): """ Calculates the temperature of a data set relative to a reference. The reference is assumed to be at 300K. Parameters ---------- Data_ref : DataObject Reference data set, assumed to be 300K Data : DataObject Data object to have the temperature calculated for Returns ------- T : uncertainties.ufloat The temperature of the data set """ T = 300 * ((Data.A * Data_ref.Gamma) / (Data_ref.A * Data.Gamma)) Data.T = T return T
15c4f998c9581db6336648d8176a5ec33874e492
465,300
def time_mirror(clip): """ Returns a clip that plays the current clip backwards. The clip must have its ``duration`` attribute set. The same effect is applied to the clip's audio and mask if any. """ return clip.time_transform(lambda t: clip.duration - t - 1, keep_duration=True)
4f7283cf53090946ed41fc1c736a7c84f7cfba37
690,306
import posixpath def normalize_path_posix(path): """ normalize_path_posix(path) -> str Turn path into a relative POSIX path with no parent directory references. """ return posixpath.relpath(posixpath.normpath(posixpath.join('/', path)), '/')
ca816b6e4902b06c5a76c10a24fc0de64cf1ac3d
112,510
def filter_claims_by_date(claims_data, from_date, to_date): """Return claims falling in the specified date range.""" return [ claim for claim in claims_data if (from_date <= claim.clm_from_dt <= to_date) ]
d1568d0fd52382bdb3f1f02414f591d5f4da3596
7,297
import string def get_filename_from_title(title): """Generate simpler file name from title Arguments: title {string} -- Simplified title to be used as the markdown filename """ printable = set(string.ascii_letters) printable.add(' ') return ''.join(filter(lambda x : x in printable, title)).strip().replace(' ', '_') + '.md'
05917929adebc45c2e7a3b3294ce607e3400ecd3
221,005
def build_fib_iterative(n): """ n: number of elements in the sequence Returns a Fibonacci sequence of n elements by iterative method """ if n == 1: return [0] elif n == 2: return [0, 1] else: fib = [0, 1] count = 2 while count < n: last_elem = fib[-1] + fib[-2] fib.append(last_elem) count += 1 return fib
edd2e95db7a7e9039f45dcd70a19521ccbe4c904
213,774
def create_log_message_payload(message: str): """Create and return "logMessage" dictionary to send to the Plugin Manager. As per Stream Deck documentation: Logs are saved to disk per plugin in the folder `~/Library/Logs/StreamDeck/` on macOS and `%appdata%\\Elgato\\StreamDeck\\logs\\` on Windows. Note that the log files are rotated each time the Stream Deck application is relaunched. Args: message (str): Message to log in Stream Deck log files. Returns: dict: Dictionary with payload to get save message to Stream Deck log file. """ return { "event": "logMessage", "payload": { "message": message } }
bf0659ea3654aca6b82d0f37e3e6cbdb523334f8
321,943
def get_object_name_by_faceid(oEditor, faceid): """ Return the object name corresponding to the given face ID. Parameters ---------- oEditor : pywin32 COMObject The HFSS editor in which the operation will be performed. faceid : int The face ID of the given face. Returns ------- objname : str The name of the object. """ return oEditor.GetObjectNameByFaceID(faceid)
4803b3ca95ca054e1534ea5cc2b0bc563ee57de2
143,934
def exchange_rate_format(data): """Return a dict with the exchange rate data formatted for serialization""" return { 'provider_1': { 'name': 'dof', 'rate': data.dof_rate, 'date': data.dof_date, 'last_updated': data.dof_last_updated, }, 'provider_2': { 'name': 'fixer', 'rate': data.fixer_rate, 'date': data.fixer_date, 'last_updated': data.fixer_last_updated, }, 'provider_3': { 'name': 'banxico', 'rate': data.banxico_rate, 'date': data.banxico_date, 'last_updated': data.banxico_last_updated, }, 'created': data.created, }
47dbc58bf1c49d14372333cb38ef4fc8ea624490
57,600
def _use_static_reflect_func(opt): """ Whether a static reflect function is to be used. """ if opt.algorithm == opt.algo_table_driven: return False elif opt.reflect_out is not None and opt.algorithm == opt.algo_bit_by_bit_fast: return False else: return True
558914c91d0cec0b6a7483464abe20014c329ad1
552,161
def weak_scaling(timing_stats, scaling_var, data_points): """ Generate data for plotting weak scaling. The data points keep a constant amount of work per processor for each data point. Args: timing_stats: the result of the generate_timing_stats function scaling_var: the variable to select from the timing_stats dictionary (can be provided in configurations via the 'scaling_var' key) data_points: the list of size and processor counts to use as data (can be provided in configurations via the 'weak_scaling_points' key) Returns: A dict of the form: {'bench' : {'mins' : [], 'means' : [], 'maxs' : []}, 'model' : {'mins' : [], 'means' : [], 'maxs' : []}, 'proc_counts' : []} """ timing_data = dict() proc_counts = [] bench_means = [] bench_mins = [] bench_maxs = [] model_means = [] model_mins = [] model_maxs = [] for point in data_points: size = point[0] proc = point[1] try: model_data = timing_stats[size][proc]['model'][scaling_var] bench_data = timing_stats[size][proc]['bench'][scaling_var] except KeyError: continue proc_counts.append(proc) model_means.append(model_data['mean']) model_mins.append(model_data['min']) model_maxs.append(model_data['max']) bench_means.append(bench_data['mean']) bench_mins.append(bench_data['min']) bench_maxs.append(bench_data['max']) timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs) timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs) timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts] return timing_data
aa0e736f505cbc41534d3d94ad97f5fe0ca3c2e7
338,929
def count_digit(k, n): """ Count the number of k's between 0 and n. k can be 0 - 9. :param k: given digit :type k: int :param n: given number :type n: int :return: number of k's between 0 and n :rtype: int """ count = 0 for i in range(n + 1): # treat number as string # count digit character in the string count += str(i).count(str(k)) return count
c232eb11a5fd7807ab468cea78f95ffe6d122e47
233,885
def update_parameters(parameters, gradients, learning_rate): """ Performs a gradient descent update. :param parameters: weights and bias units :param gradients: partial derivatives w.r.t. the weights and the bias units :param learning_rate: gradient descent step size :return: updated gradients """ L = len(parameters) // 2 for l in range(1, L + 1): parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate * gradients['dW' + str(l)] parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate * gradients['db' + str(l)] return parameters
6b833ab6fe868bea9b3a473d4349286a0c79f1da
550,354
def quote(input_str): """Adds single quotes around a string""" return "'{}'".format(input_str)
2756faeadffa84a8ff09a8ce8ea4b0b8ba328d24
52,612
def add_linear_gather_for_mode( m, Fr, Ft, Fz, exptheta_m, Fr_grid, Ft_grid, Fz_grid, iz_lower, iz_upper, ir_lower, ir_upper, S_ll, S_lu, S_lg, S_ul, S_uu, S_ug ): """ Add the contribution of the gathered field from azimuthal mode `m` to the fields felt by one macroparticle (`Fr`, `Ft`, `Fz`), using linear weights. Parameters: ----------- m: int The index of the azimuthal mode that is added. Fr, Ft, Fz: floats The fields felt by one macroparticle, which represent either E or B (before the contribution of mode `m` has been added) exptheta_m: complex The complex azimuthal factor $e^{-i m \theta}$ where $\theta$ is the azimuthal position of the macroparticle considered. Fr_grid, Ft_grid, Fz_grid: 2darrays of complexs The fields on the interpolation grid for mode `m` iz_lower, iz_upper, ir_lower, ir_upper: ints Lower and upper index in z and r from which the macroparticle considered should gather the fields (in the arrays F*_grid) S_ll, S_lu, S_lg, S_ul, S_uu, S_ug: floats The weights with which the fields are gathered, for the macroparticle considered. `S_lg` and `S_ug` are used for fields gathered from below the axis. Returns: -------- Fr, Ft, Fz: floats The fields felt by one macroparticle, which represent either E or B (after the contribution of mode `m` has been added) """ # Create temporary variables # for the "per mode" gathering Fr_m = 0.j Ft_m = 0.j Fz_m = 0.j # Lower cell in z, Lower cell in r Fr_m += S_ll * Fr_grid[ iz_lower, ir_lower ] Ft_m += S_ll * Ft_grid[ iz_lower, ir_lower ] Fz_m += S_ll * Fz_grid[ iz_lower, ir_lower ] # Lower cell in z, Upper cell in r Fr_m += S_lu * Fr_grid[ iz_lower, ir_upper ] Ft_m += S_lu * Ft_grid[ iz_lower, ir_upper ] Fz_m += S_lu * Fz_grid[ iz_lower, ir_upper ] # Upper cell in z, Lower cell in r Fr_m += S_ul * Fr_grid[ iz_upper, ir_lower ] Ft_m += S_ul * Ft_grid[ iz_upper, ir_lower ] Fz_m += S_ul * Fz_grid[ iz_upper, ir_lower ] # Upper cell in z, Upper cell in r Fr_m += S_uu * Fr_grid[ iz_upper, ir_upper ] Ft_m += S_uu * Ft_grid[ iz_upper, ir_upper ] Fz_m += S_uu * Fz_grid[ iz_upper, ir_upper ] # Add the fields from the guard cells if ir_lower == ir_upper == 0: flip_factor = (-1.)**m # Lower cell in z Fr_m += -flip_factor * S_lg * Fr_grid[ iz_lower, 0] Ft_m += -flip_factor * S_lg * Ft_grid[ iz_lower, 0] Fz_m += flip_factor * S_lg * Fz_grid[ iz_lower, 0] # Upper cell in z Fr_m += -flip_factor * S_ug * Fr_grid[ iz_upper, 0] Ft_m += -flip_factor * S_ug * Ft_grid[ iz_upper, 0] Fz_m += flip_factor * S_ug * Fz_grid[ iz_upper, 0] # Add the contribution from mode m to Fr, Ft, Fz # (Take into account factor 2 in the definition of azimuthal modes) if m == 0: factor = 1. else: factor = 2. Fr += factor*(Fr_m*exptheta_m).real Ft += factor*(Ft_m*exptheta_m).real Fz += factor*(Fz_m*exptheta_m).real return(Fr, Ft, Fz)
8ad3a174a8bb4a93e207fea935ddf376009ada25
269,887
def checkForQuote(file): """add quotes if find spaces in file name""" f = str(file) if f.find(" ") >= 0: return "'" + f + "'" return f
0423e565cde93c4ce1d4afee6beeae8b3176b8fd
199,890
import torch def SNRPSA(s,s_hat): """Computes the SNR_PSA as proposed in [1], with no compression and a saturation value of 20 References ---------- .. [1] Erdogan, Hakan, and Takuya Yoshioka. "Investigations on Data Augmentation and Loss Functions for Deep Learning Based Speech-Background Separation." Interspeech. 2018. Parameters: s: list of targets of any shape, with len(x) = #sources s_hat: list of corresponding estimates """ EPS = torch.finfo(s[0].dtype).eps den = [x_hat - x for x, x_hat in zip(s, s_hat)] prima = [-10 * torch.log10((x ** 2).sum()/(xa ** 2).sum() + EPS) for x, xa in zip(s, den)] clipping = [20*torch.tanh(x/20) for x in prima] return torch.stack(clipping).sum()/len(clipping)
9780ff2715fc948b48a0969445dd5b2ef80b91eb
474,822
def is_indexed_foreign_key(constraint): """ Whether or not given foreign key constraint's columns have been indexed. :param constraint: ForeignKeyConstraint object to check the indexes """ return any( set(column.name for column in index.columns) == set(constraint.columns) for index in constraint.table.indexes )
41811f8d18fc16951dfc8d5d3306279eab59c416
253,796
def define_activation(df, targets, input_columns, test_blocks, n_samples=None, exclude=None, scale=1): """ Function to build training objects for neural networks from a DataFrame Parameters ---------- df: DataFrame targets: list of strings list of targets (values in df.target) input_columns: list of strings columns to include as inputs test_blocks: list of numerics steps to include n_samples: int, optional exact number of samples to use exclude: int, optional exact number of initial samples to exclude scale: list of floats, optional divisors to scale inputs, one per input column Returns ------- inputs: list of lists inputs[]: list of numeric input values """ scale = [ df[input_column].astype(float).max() for input_column in input_columns ] inputs = [] num_targets = len(targets) df = df[ (df.ontarget) & (df.step.isin(test_blocks)) ].copy() for i, target in enumerate(targets): sample_n = 0 for row in df[ df.target==target ][input_columns].values.tolist(): if (exclude and sample_n < exclude): sample_n = sample_n + 1 continue elif ( (not n_samples) or (sample_n < n_samples) ): inputs.append( [ float(num)/float( scale[row_i] ) for row_i, num in enumerate(row) ] ) sample_n = sample_n + 1 return(inputs)
d87d0379d724b7648f7c6a10442bb790fb1feb53
443,465
def getDirectChildrenWithName(parent, name): """ Fetch *direct* sub-nodes of a `parent` DOM tree. These nodes must have a name matching `name`. Return a list of DOM Nodes. """ return [ node for node in parent.childNodes if node.nodeType == node.ELEMENT_NODE and \ node.localName == name ]
817518b3ca595238b290e4853f49f5df6cec64a2
303,003
def shpBBoxExpand(box, x, y=None): """ Given a shapely bounding box, return new one expanded by given amount. If y is not supplied, it the same as x. """ if y is None: y = x return (box[0] - x, box[1] - y, box[2] + x, box[3] + y)
c0a308a483b1a3a39dbf853b1d4d1e484667555b
464,883
import textwrap def dedent(ind, text): """ Dedent text to the specific indentation level. :param ind: common indentation level for the resulting text (number of spaces to append to every line) :param text: text that should be transformed. :return: ``text`` with all common indentation removed, and then the specified amount of indentation added. """ text2 = textwrap.dedent(text) if ind == 0: return text2 indent_str = " " * ind return "\n".join(indent_str + line for line in text2.split("\n"))
271b9fd270d78c4bc952af31d3d9be0ff6bdab73
1,301
import re def split_composite_term(x, joint_re = 'with'): """Break terms that are composites padding several words without space. This has been observed in one case study but may not be prevalent. Args: x (str): the term to split if matching, e.g. 'claywithsand' to 'clay with sand' joint_re (str): regular expression for the word used as fusing join, typically 'with' Returns: split wording (str): tokens split from the joining term. """ return re.sub("([a-z]+)(" + joint_re + ")([a-z]+)", r"\1 \2 \3", x, flags=re.DOTALL)
73292e684b00bba496f8d92871453a90e88e1384
305,415
import re def prepare_item(item): """ Prepares the items (each one pair from the list of pairs) to be compared. Returns the two items as separate strings. """ item = re.split(r'\] {', item) item1 = item[0][2:-1] item2 = item[1][1:-2] return item1, item2
4b2b410b6647bc9c3cbf3bee3ab168ad5e7d3e0f
359,012
def format_prob(prob, n_samples): """Translates a probability into a neat string""" if prob < 1/n_samples: return "-" elif prob > (n_samples - 1)/n_samples: return "✓" elif prob < 0.001: return "<0.1%" elif prob > 0.999: return ">99.9%" else: return f"{prob*100:.1f}%"
296ac2cc177cde1ebc4954435c6a041c7e4de22a
491,762
import json def get_person_person_key(person1_id: str, person2_id: str): """ Get a unique string from the 2 person IDs in a deterministic way. :param person1_id: the ID of the first person :param person2_id: the ID of the second person :return: the derived unique key """ return json.dumps(sorted([person1_id, person2_id]))
cc26c6b6ef632c2ac3cffa085a6aeb6960b047fc
355,160
def localize(value, locale): """ Return the value appropriate for the current locale. This is used to retrieve the appropriate localized version of a value defined within a unit's dictionary. If 'value' is a dictionary, we assume it is a mapping of locale names to values, so we select the appropriate dictionary entry based on the locale. Otherwise, we return 'value' directly, as the value hasn't been localized. """ if isinstance(value, dict): return value.get(locale) else: return value
641b6598aef2486b22846b8b799a344c7ae34e4f
553,316
def _dusort (seq, decorator, reverse = False) : """Returns a sorted copy of `seq`. The sorting is done over a decoration of the form `decorator (p), i, p for (i, p) in enumerate (seq)`. >>> _dusort ([1, 3, 5, 2, 4], lambda e : -e) [5, 4, 3, 2, 1] """ temp = [(decorator (p), i, p) for (i, p) in enumerate (seq)] temp.sort () result = [p [-1] for p in temp] if reverse : result.reverse () return result
f4d9afab55b7bcceabf96bfe78c8b6d91c89c55c
544,025
def create_n_users(size): """Create n users and return as a list""" users = [] for i in range(size): users.append({ "first_name": "First%d" % i, "last_name": "First%d" % i, "credit_card": i, "email": "%dgmai.com" % i, "username": "username%d" % i, "driver": False, "password": "%d" % i }) return users
5828d81ac254f5606d085dbae43ad396ecf077c4
465,555
def get_zero_vector(numBytes): """ Generates a zero vector of a given size :param numBytes: :return: """ return bytearray([0] * numBytes).decode('ascii')
b53c021240b060f4eed38bef570b2615a1bc925a
254,990
def fillna(df, cols=None, fill_val='__missing__'): """ A wrapper for pandas fillna function Args: df - a pandas DataFrame cols - None or list of column names if None, fillna is called on the entire DataFrame if a list of columns, fillna is called only on those columns fill_val - a value used to fill in any NA elements Returns: a DataFrame with NA values replaced with fill_val """ if cols is None: df = df.fillna(fill_val) else: df[cols] = df[cols].fillna(fill_val) return df
b3e6ddc9de19203aff14008e7a577faf101d0d9d
288,161
def harmonize_name(name, name_lookup): """Looks up harmonized school name Parameters ---------- name : str The school name to harmonize. name_lookup : iterable of two-value array-likes School names where the first value of each item is harmonized name and second value is list of different version of that name. Returns ------- str Harmonized name """ name_harmonized = [k for k, v in name_lookup if name.upper() in v] try: assert len(name_harmonized) == 1 except AssertionError: raise Exception("Could not harmonize school {}".format(name)) # Return unpacked harmonized name return (name_harmonized[0])
cd9d1437850ad7852136dbf8f5713cbeec3217ac
537,436
def calculate_total_votes(poll): """Calculate the total number of votes of a poll.""" total = 0 for vote in poll.votes: total += vote.vote_count return total
4a6314a0a4ffb1a80c8e5a927631ceb5fa646a4b
679,652
def annot(xcrd, ycrd, zcrd, txt, xancr='left'): """ Annotation structure function for plotly :param xcrd: x position :param ycrd: y position :param zcrd: z position :param txt: annotation name :param xancr: anchor position :return: annotation as dict """ annotation = dict(showarrow=False, x=xcrd, y=ycrd, z=zcrd, text=txt, xanchor=xancr, font=dict(color='white', size=12)) return annotation
4f514af6287cccc028ccd2a2ef98f5881d745c06
97,236
def merge_json(data1, data2): """merge lists in two json data together Args: data1 (json or None): first json data data2 (json): 2nd json data Returns: TYPE: merged data """ if not data1: return data2 else: for i in data2['list']: data1['list'][i] = data2['list'][i] return data1
613b931a6d83ef65be9fada1a99d34984892bdd0
38,937
def find_empty(board): """Function to find empty cells in game board. Args: board (list): the current game board. Returns: (i, j) (tuple): empty position (row, column) if found, otherwise None. """ for i in range(len(board)): for j in range(len(board[0])): if board[i][j] == 0: return (i, j) # row, col return None
7ac6d976fea3af469dc94107f4441e00d015f46e
585,418
def get_dictionary(key, resources): """Return a new dictionary using the given key and resources (key value). Keyword arguments: key -- the key to use in the dictionary resources -- the resources to use as the key value """ return { key: resources, }
5f7839834be10d2463c7dd085b76686eee0ebeea
126,167
from functools import reduce import operator def bitor(*args): """ BITOR num1 num2 (BITOR num1 num2 num3 ...) outputs the bitwise OR of its inputs, which must be integers. """ return reduce(operator.or_, args)
f8d3ca6861e2af4d92765efa964ad4d3fedfc94d
637,949
from pathlib import Path def dbt_run_results_file(dbt_artifacts_directory: Path) -> Path: """ Get the path to the dbt run results. Parameters ---------- dbt_artifacts_directory : Path The dbt artifacts directory. Returns ------- out : Path The dbt run results file. """ run_results_file = dbt_artifacts_directory / "run_results.json" return run_results_file
b163c2d5afeb627df2ba5a61eace496736c04dbc
314,793
import re def find_meta(meta, meta_file): """Extract __*meta*__ from `meta_file`.""" meta_match = re.search(r"^__{meta}__\s+=\s+['\"]([^'\"]*)['\"]".format(meta=meta), meta_file, re.M) if meta_match: return meta_match.group(1) raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
e58372db8500e129bd4d7550fac015413ac7c42c
622,654
def learning_rate_scheduler(epoch, values=(0.1, 0.01, 0.001), breakpoints=(100, 150)): """Piecewise constant schedule for learning rate.""" idx = sum(1 if epoch > b else 0 for b in breakpoints) return values[idx]
a30a3270cfd171d3b1948600b7b9edd98f6997f5
378,540