content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def arg_return_greetings(name): """ This is greeting function with arguments and return greeting message :param name: :return: """ message = F"hello {name}" return message
23bab521832358692c3aa653c6138ffee13c4e7a
6,636
from typing import Any def basic_usage(card_id: str, parent: Any = None): """Basic usage of the application, minus the card recognition bits""" data = pull_card_data(card_id) qt_window = Card(parent, data) qt_window.setWindowTitle("YGO Scanner") qt_window.show() return qt_window
6960697ef12959a7aaaf47607c3026d0925b0a88
6,637
from typing import Dict from typing import Optional from pathlib import Path import json def get_abi( contract_sources: Dict[str, str], allow_paths: Optional[str] = None, remappings: Optional[list] = None, silent: bool = True, ) -> Dict: """ Generate ABIs from contract interfaces. Arguments --------- contract_sources : dict a dictionary in the form of {'path': "source code"} allow_paths : str, optional Compiler allowed filesystem import path remappings : list, optional List of solidity path remappings silent : bool, optional Disable verbose reporting Returns ------- dict Compiled ABIs in the format `{'contractName': [ABI]}` """ final_output = { Path(k).stem: { "abi": json.loads(v), "contractName": Path(k).stem, "type": "interface", "sha1": sha1(v.encode()).hexdigest(), } for k, v in contract_sources.items() if Path(k).suffix == ".json" } for path, source in [(k, v) for k, v in contract_sources.items() if Path(k).suffix == ".vy"]: input_json = generate_input_json({path: source}, language="Vyper") input_json["settings"]["outputSelection"]["*"] = {"*": ["abi"]} try: output_json = compile_from_input_json(input_json, silent, allow_paths) except Exception: # vyper interfaces do not convert to ABIs # https://github.com/vyperlang/vyper/issues/1944 continue name = Path(path).stem final_output[name] = { "abi": output_json["contracts"][path][name]["abi"], "contractName": name, "type": "interface", "sha1": sha1(contract_sources[path].encode()).hexdigest(), } solc_sources = {k: v for k, v in contract_sources.items() if Path(k).suffix == ".sol"} if solc_sources: compiler_targets = find_solc_versions(solc_sources, install_needed=True, silent=silent) for version, path_list in compiler_targets.items(): to_compile = {k: v for k, v in contract_sources.items() if k in path_list} set_solc_version(version) input_json = generate_input_json( to_compile, language="Vyper" if version == "vyper" else "Solidity", remappings=remappings, ) input_json["settings"]["outputSelection"]["*"] = {"*": ["abi"]} output_json = compile_from_input_json(input_json, silent, allow_paths) output_json = {k: v for k, v in output_json["contracts"].items() if k in path_list} final_output.update( { name: { "abi": data["abi"], "contractName": name, "type": "interface", "sha1": sha1(contract_sources[path].encode()).hexdigest(), } for path, v in output_json.items() for name, data in v.items() } ) return final_output
bd9eb4959796d549950f8dd0372ee42c95cd0dd6
6,638
def predict(w , b , X ): """ 使用学习逻辑回归参数logistic (w,b)预测标签是0还是1, 参数: w - 权重,大小不等的数组(num_px * num_px * 3,1) b - 偏差,一个标量 X - 维度为(num_px * num_px * 3,训练数据的数量)的数据 返回: Y_prediction - 包含X中所有图片的所有预测【0 | 1】的一个numpy数组(向量) """ m = X.shape[1] #图片的数量 Y_prediction = np.zeros((1,m)) w = w.reshape(X.shape[0],1) #计预测猫在图片中出现的概率 A = sigmoid(np.dot(w.T , X) + b) for i in range(A.shape[1]): #将概率a [0,i]转换为实际预测p [0,i] Y_prediction[0,i] = 1 if A[0,i] > 0.5 else 0 #使用断言 assert(Y_prediction.shape == (1,m)) return Y_prediction
4e258d7de1788d6da5c8a832ff11a5e8718b5d84
6,639
def delta_C(parcels_old, parcels_new, normed=False): """ Compute the number of vertices that change connected component from old parcellation to new parcellation. Parameters: - - - - - parcels_old : dictionary old connected component sample assignments parcels_new : dictionary new connected component sample assignments Returns: - - - - deltaC : int number of vertices that changed label """ new = set(map(len, parcels_new.values())) old = set(map(len, parcels_old.values())) deltaC = np.int32(list(new.difference(old))).sum() if normed: deltaC = deltaC / np.sum(list(new)) return deltaC
07f48d30fbaa4b0278871b199d7768c6f2d49508
6,640
def increment(number): """Increases a given number by 1""" return number + 1
ad10a887ee571182247e76fe41fddd6d53b2dc6a
6,641
def get_recent_added_companies(parser, token): """ Gets any number of the recent added comapnies. Syntax:: {% get_recent_added_companies [limit] as [var_name] %} """ return base_tag(parser, token, RecentCreatedCompanies)
73c7c0f12951ba9d6b6a3220c2f88055ea027624
6,642
import glob def search_data(templates, pols, matched_pols=False, reverse_nesting=False, flatten=False): """ Glob-parse data templates to search for data files. Parameters ---------- templates : str or list A glob-parsable search string, or list of such strings, with a {pol} spot for string formatting. Ex. ["zen.even.{pol}.LST.*.HH.uv"] pols : str or list A polarization string, or list of polarization strings, to search for. Ex. ["xx", "yy"] matched_pols : boolean If True, only use datafiles that are present for all polarizations. reverse_nesting : boolean If True, flip the nesting of datafiles to be datafile-polarization. By default, the output is polarization-datafile. flatten : boolean If True, flatten the nested output datafiles to a single hierarchy. Returns ------- datafiles : list A nested list of paths to datafiles. By default, the structure is polarization-datafile nesting. If reverse_nesting, then the structure is flipped to datafile-polarization structure. datapols : list List of polarizations for each file in datafile """ # type check if isinstance(templates, (str, np.str)): templates = [templates] if isinstance(pols, (str, np.str, np.integer, int)): pols = [pols] # search for datafiles datafiles = [] datapols = [] for pol in pols: dps = [] dfs = [] for template in templates: _dfs = glob.glob(template.format(pol=pol)) if len(_dfs) > 0: dfs.extend(_dfs) dps.extend([pol for df in _dfs]) if len(dfs) > 0: datafiles.append(sorted(dfs)) datapols.append(dps) # get unique files allfiles = [item for sublist in datafiles for item in sublist] allpols = [item for sublist in datapols for item in sublist] unique_files = set() for _file in allfiles: for pol in pols: if ".{pol}.".format(pol=pol) in _file: unique_files.update(set([_file.replace(".{pol}.".format(pol=pol), ".{pol}.")])) break unique_files = sorted(unique_files) # check for unique files with all pols if matched_pols: Npols = len(pols) _templates = [] for _file in unique_files: goodfile = True for pol in pols: if _file.format(pol=pol) not in allfiles: goodfile = False if goodfile: _templates.append(_file) # achieve goal by calling search_data with new _templates that are polarization matched datafiles, datapols = search_data(_templates, pols, matched_pols=False, reverse_nesting=False) # reverse nesting if desired if reverse_nesting: datafiles = [] datapols = [] for _file in unique_files: dfs = [] dps = [] for pol in pols: df = _file.format(pol=pol) if df in allfiles: dfs.append(df) dps.append(pol) datafiles.append(dfs) datapols.append(dps) # flatten if flatten: datafiles = [item for sublist in datafiles for item in sublist] datapols = [item for sublist in datapols for item in sublist] return datafiles, datapols
9f8018de15db0659928e28779ebf4acda0ddba74
6,643
import re def normalize_word(word): """ :type word: str :rtype: str """ acronym_pattern = r'^(?:[A-Z]\.)+$' if re.match(pattern=acronym_pattern, string=word): word = word.replace('.', '') if word.lower() in _REPLACE_WORDS: replacement = _REPLACE_WORDS[word.lower()] if word.islower(): return replacement.lower() elif word.isupper(): return replacement.upper() elif word[0].isupper() and word[1:].islower(): return replacement.capitalize() else: return replacement else: return word
e2c96d456cc8b555b68f2c7498a6d2898ce5990e
6,644
def _ggm_qsize_prob_gt_0_whitt_5_2(arr_rate, svc_rate, c, ca2, cs2): """ Return the approximate P(Q>0) in G/G/m queue using Whitt's simple approximation involving rho and P(W>0). This approximation is exact for M/M/m and has strong theoretical support for GI/M/m. It's described by Whitt as "crude" but is "a useful quick approximation". See Section 5 of Whitt, Ward. "Approximations for the GI/G/m queue" Production and Operations Management 2, 2 (Spring 1993): 114-161. In particular, this is Equation 5.2. Parameters ---------- arr_rate : float average arrival rate to queueing system svc_rate : float average service rate (each server). 1/svc_rate is mean service time. c : int number of servers cv2_svc_time : float squared coefficient of variation for service time distribution Returns ------- float ~ P(Q > 0) """ rho = arr_rate / (svc_rate * float(c)) pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2) prob_gt_0 = rho * pdelay return prob_gt_0
3eded5597dc199e61c4d79187369e1a84531ac3d
6,645
def pds3_label_gen_date(file): """Returns the creation date of a given PDS3 label. :param path: File path :type path: str :return: Creation date :rtype: str """ generation_date = "N/A" with open(file, "r") as f: for line in f: if "PRODUCT_CREATION_TIME" in line: generation_date = line.split("=")[1].strip() return generation_date
c2877fa9246dd0c12c6ea47635ab248dc038b179
6,646
def harmony(*args): """ Takes an arbitrary number of floats and prints their harmonic medium value. Calculation is done with formula: number_of_args \ (1 \ item1 + 1 \ item2 + ...) Args: *args (tuple): number of arguments with a type: float, integer Returns: float: harmonic medium value """ result = 0 if 0 in args: return 0.0 for item in args: result += 1 / item return len(args) / result
bc66276b3ef27ef0bfd059afa8ca7afd5d9cbb82
6,647
def node_gdf_from_graph(G, crs = 'epsg:4326', attr_list = None, geometry_tag = 'geometry', xCol='x', yCol='y'): """ Function for generating GeoDataFrame from Graph :param G: a graph object G :param crs: projection of format {'init' :'epsg:4326'}. Defaults to WGS84. note: here we are defining the crs of the input geometry - we do NOT reproject to this crs. To reproject, consider using geopandas' to_crs method on the returned gdf. :param attr_list: list of the keys which you want to be moved over to the GeoDataFrame, if not all. Defaults to None, which will move all. :param geometry_tag: specify geometry attribute of graph, default 'geometry' :param xCol: if no shapely geometry but Longitude present, assign here :param yCol: if no shapely geometry but Latitude present, assign here :returns: a geodataframe of the node objects in the graph """ nodes = [] keys = [] # finds all of the attributes if attr_list is None: for u, data in G.nodes(data = True): keys.append(list(data.keys())) flatten = lambda l: [item for sublist in l for item in sublist] attr_list = list(set(flatten(keys))) if geometry_tag in attr_list: non_geom_attr_list = attr_list non_geom_attr_list.remove(geometry_tag) else: non_geom_attr_list = attr_list if 'node_ID' in attr_list: non_geom_attr_list = attr_list non_geom_attr_list.remove('node_ID') z = 0 for u, data in G.nodes(data = True): if geometry_tag not in attr_list and xCol in attr_list and yCol in attr_list : try: new_column_info = { 'node_ID': u, 'geometry': Point(data[xCol], data[yCol]), 'x': data[xCol], 'y': data[yCol]} except: print('Skipped due to missing geometry data:',(u, data)) else: try: new_column_info = { 'node_ID': u, 'geometry': data[geometry_tag], 'x':data[geometry_tag].x, 'y':data[geometry_tag].y} except: print((u, data)) for i in non_geom_attr_list: try: new_column_info[i] = data[i] except: pass nodes.append(new_column_info) z += 1 nodes_df = pd.DataFrame(nodes) nodes_df = nodes_df[['node_ID', *non_geom_attr_list, geometry_tag]] nodes_df = nodes_df.drop_duplicates(subset = ['node_ID'], keep = 'first') nodes_gdf = gpd.GeoDataFrame(nodes_df, geometry = nodes_df.geometry, crs = crs) return nodes_gdf
cf5849c672877010aae7b1fb841a6993a53d232f
6,648
def views(): """ Used for the creation of Orientation objects with `Orientations.from_view_up` """ return [[1, 0, 0], [2, 0, 0], [-1, 0, 0]]
21ffce8e8a56cf31e2d03a6384d584bcb4bfb2c8
6,649
def check_closed(f): """Decorator that checks if connection/cursor is closed.""" def g(self, *args, **kwargs): if self.closed: raise exceptions.Error(f'{self.__class__.__name__} already closed') return f(self, *args, **kwargs) return g
4772de94c28022266ee01f0c900e8937859cc58c
6,651
def get_diff_comparison_score(git_commit, review_url, git_checkout_path, cc): # pragma: no cover """Reads the diff for a specified commit Args: git_commit(str): a commit hash review_url(str): a rietveld review url git_checkout_path(str): path to a local git checkout cc: a cursor for the Cloud SQL connection Return: score(float): a score in [0,1] where 0 is no similarity and 1 is a perfect match """ git_diff = get_git_diff(git_commit, git_checkout_path) comparable_git_diff = [x for x in git_diff if x.startswith('+') \ or x.startswith('-')] rietveld_diff = get_rietveld_diff(review_url, cc, git_checkout_path) comparable_rietveld_diff = [x for x in rietveld_diff if x.startswith('+') \ or x.startswith('-')] matching = list(set(comparable_git_diff) - set(comparable_rietveld_diff)) total = max(len(comparable_git_diff), len(comparable_rietveld_diff)) score = 1 - float(len(matching)) / total if total != 0 else 0 return score
b68904a62d1e42b8e147705984c9455ff0f5d6fc
6,653
def pack(pieces=()): """ Join a sequence of strings together. :param list pieces: list of strings :rtype: bytes """ return b''.join(pieces)
ffd0852a16c6292f921e5cf205301171e3a96fd3
6,654
def compte_var(f, var): """compte le nombre d'apparition de la variable var dans f""" n = f.nb_operandes() if n == 0: v = f.get_val() if v == var: return 1 else: return 0 elif n == 1: f2 = (f.decompose())[0] return compte_var(f2, var) else: [f2, f3] = f.decompose() return compte_var(f2, var) + compte_var(f3, var)
002051e3bf723cfcc1a2cb3d094b58980591adc5
6,656
from watchlist.models import User def inject_vars(): # 函数名可以随意修改 """模板上下文处理函数""" user = User.query.first() # 用户对象 if not user: user = User() user.name = 'BL00D' return locals()
edf9126fb919cb825acac3f951f481575fe2ef57
6,657
def try_parse_section(text: str, section_name: str) -> str: """ Parse a section. Return an empty string if section not found. Args: text (str): text section_name (str): section's name Returns: (str): section """ try: return parse_section(text, section_name) except Exception: return ""
26c8d6d3f8475954fcf742e662981ad5f1223e53
6,658
from bs4 import BeautifulSoup def get_location_based_lifers(web_page): """ a method that takes in a web page and returns back location frequency for lifers and lifer details. """ bs4_object = BeautifulSoup(web_page, html_parser) table_list = bs4_object.find_all('li', class_=myebird_species_li_class) lifer_data_list = [] for item in table_list: bird_name = item.find_all('div')[1].find('a').find_all('span')[0].contents[0].strip() location = item.find_all('div')[2].find_all('div')[1].find_all('a')[0].contents[0].strip() date = item.find_all('div')[2].find_all('div')[0].find('a').contents[0].strip() lifer_data_list.append([bird_name, location, date]) location_frequency = dict() for item in range(len(lifer_data_list)): if lifer_data_list[item][1] in location_frequency.keys(): location_frequency[lifer_data_list[item][1]] += 1 else: location_frequency[lifer_data_list[item][1]] = 1 sorted_location_frequency = sorted(location_frequency.items(), key=lambda x: x[1], reverse=True) return sorted_location_frequency, lifer_data_list
1c6b85962f6c142ab816255a1fe5c98f272dfebb
6,659
def parse_show_qos_queue_profile(raw_result): """ Parse the show command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the 'show qos queue-profile' command in a \ dictionary: for 'show qos queue-profile': :: { 'default': {'profile_name': 'default', 'profile_status': 'applied'}, 'factory-default': {'profile_name': 'factory-default', 'profile_status': 'complete'} } for 'show qos queue-profile <name>': :: { '0': {'queue_num': '0', 'local_priorities': '0', 'name': 'Scavenger_and_backup_data'}, '1': {'queue_num': '1', 'local_priorities': '1', 'name': ''}, ... } """ hyphen_line = raw_result.splitlines()[1] columns = [pos for pos, char in enumerate(hyphen_line) if char == ' '] result = {} if len(columns) + 1 == 2: # All profiles. # Skip the first two banner lines. for line in raw_result.splitlines()[2:]: profile_name = line[columns[0]:len(line)].strip() result[profile_name] = {} result[profile_name]['profile_status'] = \ line[0:columns[0]].strip() result[profile_name]['profile_name'] = \ line[columns[0]:len(line)].strip() elif len(columns) + 1 == 3: # Single profile. # Skip the first two banner lines. for line in raw_result.splitlines()[2:]: queue_num = line[0:columns[0]].strip() result[queue_num] = {} result[queue_num]['queue_num'] = \ line[0:columns[0]].strip() result[queue_num]['local_priorities'] = \ line[columns[0]:columns[1]].strip() result[queue_num]['name'] = \ line[columns[1]:len(line)].strip() else: # Error. raise ValueError("Unexpected number of columns.") return result
2a883a50663607356e0edadeb2d4cf17d34ab028
6,660
import random import io def plot_png(num_x_points=50): """ renders the plot on the fly. """ fig = Figure() axis = fig.add_subplot(1, 1, 1) x_points = range(num_x_points) axis.plot(x_points, [random.randint(1, 30) for x in x_points]) output = io.BytesIO() FigureCanvasAgg(fig).print_png(output) return Response(output.getvalue(), mimetype="image/png")
bac5e9146bf0b60d943e5d58376a84eddebd21ec
6,661
def render_message(session, window, msg, x, y): """Render a message glyph. Clears the area beneath the message first and assumes the display will be paused afterwards. """ # create message box msg = GlyphCoordinate(session, msg, x, y) # clear existing glyphs which intersect for gly in ( session.query(GlyphCoordinate) .join(GlyphCoordinate.glyph) .filter(GlyphCoordinate.intersects(msg)) ): gly.blank(window) # render msg.render(window, {}) window.refresh() return msg
2f0362dfa1f884571339456b0a610e0f6cdd75a6
6,662
from pathlib import Path def part_one(filename='input.txt', target=2020): """Satisfies part one of day one by first sorting the input rows so we can avoid the worst case O(n**2). We incur O(n log n) to do the sort followed by a brute force search with short circuiting if the sum exceeds our target. This is possible since we know in sorted order, only larger values will follow. Note, we assume only one valid solution in the given file. If more than one, there is no guarantee which will be returned. Parameters ---------- filename : str, optional The file to parse as input will contain one integer per line, by default 'input.txt' target : int, optional The target sum we want to reach, by default 2020 Returns ------- int The product of the two integers that sum to the target value Raises ------ Exception Probably overkill, but I wanted to know if my code was failing to find a solution. Also, I could have looked for a more appropriate exception than the base one. """ items = sorted(map(int, Path(filename).read_text().split())) count = len(items) for i in range(count): for j in range(i+1, count): summand = items[i] + items[j] if summand > target: break elif summand == target: return items[i]*items[j] raise Exception('No solution!')
d9c6790f9c5b5de7fbd9555a8483f2cb0e156b3b
6,663
def get_urls(name, version=None, platform=None): """ Return a mapping of standard URLs """ dnlu = rubygems_download_url(name, version, platform) return dict( repository_homepage_url=rubygems_homepage_url(name, version), repository_download_url=dnlu, api_data_url=rubygems_api_url(name, version), download_url=dnlu, )
d11666a72771187166a6b9f620237639fd8422f3
6,664
def getEHfields(m1d, sigma, freq, zd, scaleUD=True, scaleValue=1): """Analytic solution for MT 1D layered earth. Returns E and H fields. :param discretize.base.BaseMesh, object m1d: Mesh object with the 1D spatial information. :param numpy.ndarray, vector sigma: Physical property of conductivity corresponding with the mesh. :param float, freq: Frequency to calculate data at. :param numpy.ndarray, vector zd: location to calculate EH fields at :param bool, scaleUD: scales the output to be scaleValue at the top, increases numerical stability. Assumes a halfspace with the same conductive as the deepest cell. """ # Note add an error check for the mesh and sigma are the same size. # Constants: Assume constant mu = mu_0 * np.ones((m1d.nC + 1)) eps = eps_0 * np.ones((m1d.nC + 1)) # Angular freq w = 2 * np.pi * freq # Add the halfspace value to the property sig = np.concatenate((np.array([sigma[0]]), sigma)) # Calculate the wave number k = np.sqrt(eps * mu * w ** 2 - 1j * mu * sig * w) # Initiate the propagation matrix, in the order down up. UDp = np.zeros((2, m1d.nC + 1), dtype=complex) UDp[ 1, 0 ] = scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh # Loop over all the layers, starting at the bottom layer for lnr, h in enumerate(m1d.hx): # lnr-number of layer, h-thickness of the layer # Calculate yp1 = k[lnr] / (w * mu[lnr]) # Admittance of the layer below the current layer zp = (w * mu[lnr + 1]) / k[lnr + 1] # Impedance in the current layer # Build the propagation matrix # Convert fields to down/up going components in layer below current layer Pj1 = np.array([[1, 1], [yp1, -yp1]], dtype=complex) # Convert fields to down/up going components in current layer Pjinv = 1.0 / 2 * np.array([[1, zp], [1, -zp]], dtype=complex) # Propagate down and up components through the current layer elamh = np.array( [[np.exp(-1j * k[lnr + 1] * h), 0], [0, np.exp(1j * k[lnr + 1] * h)]] ) # The down and up component in current layer. UDp[:, lnr + 1] = elamh.dot(Pjinv.dot(Pj1)).dot(UDp[:, lnr]) if scaleUD: # Scale the values such that 1 at the top scaleVal = UDp[:, lnr + 1 :: -1] / UDp[1, lnr + 1] if np.any(~np.isfinite(scaleVal)): # If there is a nan (thickness very great), rebuild the move up cell scaleVal = np.zeros_like(UDp[:, lnr + 1 :: -1], dtype=complex) scaleVal[1, 0] = scaleValue UDp[:, lnr + 1 :: -1] = scaleVal # Calculate the fields Ed = np.empty((zd.size,), dtype=complex) Eu = np.empty((zd.size,), dtype=complex) Hd = np.empty((zd.size,), dtype=complex) Hu = np.empty((zd.size,), dtype=complex) # Loop over the layers and calculate the fields # In the halfspace below the mesh dup = m1d.vectorNx[0] dind = dup >= zd Ed[dind] = UDp[1, 0] * np.exp(-1j * k[0] * (dup - zd[dind])) Eu[dind] = UDp[0, 0] * np.exp(1j * k[0] * (dup - zd[dind])) Hd[dind] = (k[0] / (w * mu[0])) * UDp[1, 0] * np.exp(-1j * k[0] * (dup - zd[dind])) Hu[dind] = -(k[0] / (w * mu[0])) * UDp[0, 0] * np.exp(1j * k[0] * (dup - zd[dind])) for ki, mui, epsi, dlow, dup, Up, Dp in zip( k[1::], mu[1::], eps[1::], m1d.vectorNx[:-1], m1d.vectorNx[1::], UDp[0, 1::], UDp[1, 1::], ): dind = np.logical_and(dup >= zd, zd > dlow) Ed[dind] = Dp * np.exp(-1j * ki * (dup - zd[dind])) Eu[dind] = Up * np.exp(1j * ki * (dup - zd[dind])) Hd[dind] = (ki / (w * mui)) * Dp * np.exp(-1j * ki * (dup - zd[dind])) Hu[dind] = -(ki / (w * mui)) * Up * np.exp(1j * ki * (dup - zd[dind])) # Return return the fields return Ed, Eu, Hd, Hu
e850762955ff513adef7099b61eb285d059c1ffe
6,665
def repeated(f, n): """Returns a function that takes in an integer and computes the nth application of f on that integer. Implement using recursion! >>> add_three = repeated(lambda x: x + 1, 3) >>> add_three(5) 8 >>> square = lambda x: x ** 2 >>> repeated(square, 2)(5) # square(square(5)) 625 >>> repeated(square, 4)(5) # square(square(square(square(5)))) 152587890625 >>> repeated(square, 0)(5) 5 >>> from construct_check import check >>> # ban iteration >>> check(HW_SOURCE_FILE, 'repeated', ... ['For', 'While']) True """ if n == 0: return identity else: return compose1(f, repeated(f, n - 1))
dd2024ffa7c5abbcfb43b1a6a8d6ea00c3fb42c4
6,666
def method_functions(): """ Returns a dictionary containing the valid method keys and their corresponding dispersion measure functions. """ return _available
6d9ea23e4c0449b4b2d0a27b5117be30400a7d43
6,667
def generate_auth_token(): """Generate a token using jwt. Returns: token. """ key = PRIVATE_KEY data = {'appId': APPLICATION_ID} token = jwt.encode(data, key, algorithm='RS256') return token
a2e9307f392a8a6c0d83e9f1064475c11fc4eeec
6,669
from typing import Dict from typing import Any from typing import List def dict_expand(d: Dict[Any, Any]) -> List[Dict[Any, Any]]: """Converts a dictionary of lists to a list of dictionaries. The resulting list will be of the same length as the longest dictionary value. If any values are not lists then they will be repeated to the required length. Args: d: The dictionary of arrays to expand. Returns: The resulting list of dictionaries. """ size = max([_len_arg(arg) for arg in d.values()]) d = {k: _expand_arg(v, size) for k, v in d.items()} return [{k: v[i] for k, v in d.items()} for i in range(size)]
6ca2c25318a3b6bc0b2a45bf3aeec7187ad78e5c
6,670
def get_asexual_lineage_num_discrete_state_changes(lineage, attribute_list): """Get the number of discrete state changes from an asexual lineage. State is described by the aggregation of all attributes give by attribute list. Args: lineage (networkx.DiGraph): an asexual lineage attribute_list (list): list of attributes (strings) to use when defining a state Returns: Returns the number of discrete states along the lineage. """ # Check that lineage is an asexual lineage. if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage") # Check that all nodes have all given attributes in the attribute list if not utils.all_taxa_have_attributes(lineage, attribute_list): raise Exception("given attributes are not universal among all taxa along the lineage") # get the first state (root node) lineage_id = utils.get_root_ids(lineage)[0] num_states = 1 cur_state = [lineage.nodes[lineage_id][attr] for attr in attribute_list] # count the number of state changes moving down the lineage while True: successor_ids = list(lineage.successors(lineage_id)) if len(successor_ids) == 0: break # We've hit the last thing! lineage_id = successor_ids[0] state = [lineage.nodes[lineage_id][attr] for attr in attribute_list] if cur_state != state: cur_state = state num_states += 1 return num_states
9c0d4badc7b4fea70c56ce69727e48eb991a96e1
6,671
def check_downloaded(dataset: str, directory: str = None) -> bool: """ Check whether dataset is downloaded Args: dataset (str): String of dataset's name, e.g. ml-100k, bx directory (str, optional): String of directory of downloaded data. Defaults to None. Returns: bool: Boolean flag to show if the dataset is downloaded, i.e. name of dataset is in the list of subdirectory in input directory. """ return True if dataset in get_downloaded_data(directory=directory) else False
bf3342e7da11b34918bc2cb9939c95145d2f4feb
6,672
from pathlib import Path def enterprise_1_9_installer() -> Path: """ Return the path to an installer for DC/OS Enterprise 1.9. """ return Path('/tmp/dcos_generate_config_1_9.ee.sh')
857b5d339e05cbb225189d7ee47d0415fc539c54
6,673
def Laplacian(n): """ Create Laplacian on 2-dimensional grid with n*n nodes """ B = forward_diff_matrix(n) D = -B.T @ B Dx = sparse.kron(sparse.eye(n), D).tocsr() Dy = sparse.kron(D, sparse.eye(n)).tocsr() return Dx + Dy
47d70e635dc8e7d722e069435d17214a6ea3c6de
6,674
import re def LookupGitSVNRevision(directory, depth): """ Fetch the Git-SVN identifier for the local tree. Parses first |depth| commit messages. Errors are swallowed. """ if not IsGitSVN(directory): return None git_re = re.compile(r'^\s*git-svn-id:\s+(\S+)@(\d+)') proc = RunGitCommand(directory, ['log', '-' + str(depth)]) if proc: for line in proc.stdout: match = git_re.match(line) if match: id = match.group(2) if id: proc.stdout.close() # Cut pipe for fast exit. return id return None
664da44ee6057a62eb8ece161ade5cabac15bc7b
6,675
def collect_jars( dep_targets, dependency_analyzer_is_off = True, unused_dependency_checker_is_off = True, plus_one_deps_is_off = True): """Compute the runtime and compile-time dependencies from the given targets""" # noqa if dependency_analyzer_is_off: return _collect_jars_when_dependency_analyzer_is_off( dep_targets, unused_dependency_checker_is_off, plus_one_deps_is_off, ) else: return _collect_jars_when_dependency_analyzer_is_on(dep_targets)
10203c31bb2d1b5df9336d606355b497f7dd755a
6,676
def _cred1_adapter(user=None, password=None): """Just a sample adapter from one user/pw type to another""" return dict(user=user + "_1", password=password + "_2")
9e7c218d2dc01793cba232ba1f6d69a54bf21fee
6,677
def acc_metric(y_true, y_pred): """ Accuracy """ diff = K.abs(y_pred - y_true) * 5000 return K.mean(diff, axis=-1)
0722791db5546f16648f74b8927590de8696e3d5
6,678
async def confirm(message: discord.Message, fallback: str = None) -> bool: """ Helper function to send a checkmark reaction on a message. This would be used for responding to a user that an action completed successfully, without sending a whole other message. If a checkmark reaction cannot be added, the optional `fallback` message will be sent instead. :param discord.Message message: The message to add the reaction to. :param str fallback: The fallback message to be sent to the channel, if the reaction could not be added. :return: Whether confirming the message succeeded. """ try: await message.add_reaction("☑") except: pass else: return True if fallback is None: return False # now still executing only if the above failed try: await message.channel.send(fallback) except: return False # we weren't able to send any feedback to the user at all else: return True
2567957d4239605072bd4f707c12e2b265b8cfbe
6,679
def get_layer_version( lambda_client: BaseClient, layer_name: str, version: int, ) -> "definitions.LambdaLayer": """Retrieve the configuration for the specified lambda layer.""" return definitions.LambdaLayer( lambda_client.get_layer_version( LayerName=layer_name, VersionNumber=version, ) )
cfa2121ac757ae24b67bb25f7fd3046f017df85d
6,680
import requests def get_detail_msg(detail_url): """ 2.获取某个职位的详细数据 :param detail_url: 职位详细页面的url :return: 职位数据 """ # print('请求的详细地址是:' + detail_url) response = requests.get(detail_url, headers=HEADERS) html_element = etree.HTML(response.text) position = {} # 【数据】获取职位标题 title = html_element.xpath('//tr[@class="h"]/td/text()')[0] position['title'] = title # 【数据】工作地点/职位类别 top_infos = html_element.xpath('//tr[@class="c bottomline"]//text()') position['location'] = top_infos[top_infos.index('工作地点:') + 1] position['category'] = top_infos[top_infos.index('职位类别:') + 1] content_infos = html_element.xpath('//ul[@class="squareli"]') # 【数据】工作职责 work_do_info = content_infos[0] position['duty'] = work_do_info.xpath("./li/text()") # 【数据】工作要求 work_ask_info = content_infos[1] position['ask'] = work_ask_info.xpath('./li/text()') return position
2fc5b316abed9eb9aeff99ae87cdd8e5e59a5e70
6,681
def wizard_process_received_form(form): """ Processing of form received during the time measure Expected result example: {1: '00:43.42', 2: '00:41.35', 3: '00:39.14', 4: '00:27.54'} """ lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith("line")} # print(lines) times = {key.split('_')[1]: value for key, value in form.items() if key.startswith("time")} # print(times) return {int(value): times[key] for key, value in lines.items()}
54b10589cab7ce689b64f5373d2f0a998044db82
6,682
import inspect def getsource(obj,is_binary=False): """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Inputs: - obj: an object whose source code we will attempt to extract. Optional inputs: - is_binary: whether the object is known to come from a binary source. This implementation will skip returning any output for binary objects, but custom extractors may know how to meaningfully process them.""" if is_binary: return None else: return inspect.getsource(obj)
9e97a030c695b9ea50d27abc5253e47be7d4c06a
6,683
import re def extract_sector_id(room): """Given a room identifier of the form: 'aaa-bbb-cc-d-e-123[abcde]' Return the sector id: '123' """ m = re.search(r'(?P<sector_id>\d+)', room) return m.group('sector_id') if m else None
f5bfb64d32769cd4b6c2b7309d41450fa807d7a2
6,684
def splitext_all(_filename): """split all extensions (after the first .) from the filename should work similar to os.path.splitext (but that splits only the last extension) """ _name, _extensions = _filename.split('.')[0], '.'.join(_filename.split('.')[1:]) return(_name, "."+ _extensions)
bf9e4ee06eb30dfeb7898ce6e34607bef20b290b
6,685
def tag_in_tags(entity, attribute, value): """ Return true if the provided entity has a tag of value in its tag list. """ return value in entity.tags
ad88be5f8848b387f2a261ce5506dffde285a1d8
6,687
def generate_finding_title(title): """ Generate a consistent title for a finding in AWS Security Hub * Setup as a function for consistency """ return "Trend Micro: {}".format(title)
0cf390c2579e06c2166b086332035b864d3db1e3
6,688
def makeHexagon(x,y,w,h): """Return hexagonal QPolygon. (x,y) is top left coner""" points=[] cos=[1.,0.5,-0.5,-1,-0.5,0.5] sin=[0,0.866025,0.866025,0,-0.866025,-0.866025] for i in range(len (cos)): points.append(QPoint(x+w*cos[i],y+h*sin[i])) return QPolygonF(points)
7310e0313130f54b125c81f332a541b2b2b9b9a9
6,689
def save_conv_output(activations, name): """ Saves layer output in activations dict with name key """ def get_activation(m, i, o): activations[name] = F.relu(o).data.cpu().numpy() return get_activation
13034128234ea6a9633ae144ac02788f2d49986a
6,690
async def get_profile_xp(user_id: int): """ Get a user's profile xp. :param user_id: Discord User ID """ return (await self.conn.fetchrow("SELECT profilexp FROM currency.levels WHERE userid = $1", user_id))[0]
d029bb335442aa3ba5ef02b143351e8ccc6b6434
6,691
from typing import Optional def validate_raw_data(data: Optional[UserPackage]) -> bool: """Returns False if invalid data""" # NOTE: add more validation as more fields are required if data is None or data.contribs is None: return False if ( data.contribs.total_stats.commits_count > 0 and len(data.contribs.total_stats.languages) == 0 ): return False return True
22185bc2691b6a5fce98749c119ea14649c0d676
6,693
def extractTheSunIsColdTranslations(item): """ Parser for 'The Sun Is Cold Translations' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if '108 maidens' in item['tags']: return buildReleaseMessageWithType(item, '108 Maidens of Destiny', vol, chp, frag=frag, postfix=postfix) if 'Back to the Apocalypse' in item['tags']: return buildReleaseMessageWithType(item, 'Back to the Apocalypse', vol, chp, frag=frag, postfix=postfix) return False
94ef69f42dd183a2155a02c8035c12da30eb34e2
6,694
import torch def to_device(x, device): """Cast a hierarchical object to pytorch device""" if isinstance(x, torch.Tensor): return x.to(device) elif isinstance(x, dict): for k in list(x.keys()): x[k] = to_device(x[k], device) return x elif isinstance(x, list) or isinstance(x, tuple): return type(x)(to_device(t, device) for t in x) else: raise ValueError('Wrong type !')
a315905fb0cf6d6720103c0d22440418ebd41bf1
6,695
def git_show_oneline(obj): """Returns: One-line description of a git object `obj`, which is typically a commit. https://git-scm.com/docs/git-show """ return exec_headline(['git', 'show', '--oneline', '--quiet', obj])
77427786a8d1b9e3b01d5194387f047c1c9ce505
6,696
from operator import and_ import logging def like_post(): """ Like a post """ try: # This will prevent old code from adding invalid post_ids post_id = int(request.args.get('post_id', '-1')) if post_id < 0: return "No Post Found to like!" vote = (db_session.query(Vote) .filter(and_(Vote.object_id == post_id, Vote.user_id == current_user.id)) .first()) if not vote: vote = Vote(user_id=current_user.id, object_id=post_id) db_session.add(vote) db_session.commit() except Exception as e: logging.warning(f'ERROR processing request {e}') return ""
8cdde2ec6f71104178c49661a9e3fdf5c62bb67d
6,697
def login_post(): """Obdelaj izpolnjeno formo za prijavo""" # Uporabniško ime, ki ga je uporabnik vpisal v formo username = bottle.request.forms.user # Izračunamo MD5 hash geslo, ki ga bomo spravili password = password_md5(bottle.request.forms.psw) # Preverimo, ali se je uporabnik pravilno prijavil c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) c.execute("SELECT 1 FROM uporabnik WHERE username=%s AND geslo=%s", [username, password]) if c.fetchone() is None: # Username in geslo se ne ujemata return bottle.template("login.html", napaka="Nepravilna prijava", # v template login nastavljeno opozorilo username=username) # ohranimo isto uporabnisko ime else: # Vse je v redu, nastavimo cookie in preusmerimo na glavno stran bottle.response.set_cookie('username', username, path='/', secret=secret) bottle.redirect("/")
9b2243f7e618833d59a7d07454ed8fe86d4b18fc
6,698
def parse_symbol_file(filepath, fapi=None): """Read in stock symbol list from a text file. Args: filepath: Path to file containing stock symbols, one per line. fapi: If this is supplied, the symbols read will be conformed to a financial API; currently 'google' or 'yahoo'. Returns: List of stock symbols; list may be empty if file could not be parsed. """ try: with open(filepath, 'r') as file_handle: symbols = [line.strip() for line in list(file_handle) if '#' not in line] if fapi: symbols = conform_symbols(symbols, fapi) except IOError: symbols = [] return symbols
af0f085fd5424045c71dee6a290a07645f242ff8
6,699
def trader_tactic_snapshot(symbol, strategy, end_dt=None, file_html=None, fq=True, max_count=1000): """使用聚宽的数据对任意标的、任意时刻的状态进行策略快照 :param symbol: 交易标的 :param strategy: 择时交易策略 :param end_dt: 结束时间,精确到分钟 :param file_html: 结果文件 :param fq: 是否复权 :param max_count: 最大K线数量 :return: trader """ tactic = strategy(symbol) base_freq, freqs = tactic['base_freq'], tactic['freqs'] bg, data = get_init_bg(symbol, end_dt, base_freq=base_freq, freqs=freqs, max_count=max_count, fq=fq) trader = create_advanced_trader(bg, data, strategy) if file_html: trader.take_snapshot(file_html) print(f'saved into {file_html}') else: trader.open_in_browser() return trader
0b979008f3c950ed41aa85eff2031e6cb34b8685
6,700
def preprocess(batch): """ Add zero-padding to a batch. """ tags = [example.tag for example in batch] # add zero-padding to make all sequences equally long seqs = [example.words for example in batch] max_length = max(map(len, seqs)) seqs = [seq + [PAD] * (max_length - len(seq)) for seq in seqs] return seqs, tags
832b6453714e7b7eb23271b771bbc156a09d3784
6,701
def append_include_as(include_match): """Convert ``#include x`` to ``#include x as y``, where appropriate; also, convert incorrect "as" statements. See INCLUDE_AS dict for mapping from resource to its "as" target. Parameters ---------- include_match : re._pattern_type Match produced by INCLUDE_RE.match(string) Returns ------- repl : string Replacement text for whatever comes after the "#include " """ include_text = include_match.groups()[0] include_as_match = PISAConfigParser.INCLUDE_AS_RE.match(include_text) as_section = None if include_as_match: gd = include_as_match.groupdict() resource = gd['file'] as_section = gd['as'] else: resource = include_text if resource in INCLUDE_AS.keys(): as_section = INCLUDE_AS[resource] if as_section is None: repl = '#include ' + resource else: repl = '#include %s as %s' % (resource, as_section) return repl
0eaaa356efa33f0e64db6b3a843236fd15ebb66d
6,702
def get_user_profiles(page=1, limit=10): """Retrieves a list of user profiles. :param page: Page number :type page: int :param limit: Maximum number of results to show :type limit: int :returns: JSON string of list of user profiles; status code :rtype: (str, int) """ # initialize query query = Query.make( UserProfile, UserProfile.id.asc(), { 'id.asc': UserProfile.id.asc(), 'id.desc': UserProfile.id.desc(), 'user_id.asc': UserProfile.user_id.asc(), 'user_id.desc': UserProfile.user_id.desc(), 'joined_at.asc': UserProfile.joined_at.asc(), 'joined_at.desc': UserProfile.joined_at.desc(), }, request.args, Query.STATUS_FILTER_ADMIN) # retrieve and return results results = list(query.limit(limit).offset((page - 1) * limit)) if len(results) > 0: # prep initial output output = { 'user_profiles': UserProfileAdminSchema(many=True).dump(results), 'page': page, 'limit': limit, 'total': query.count() } # add pagination URIs and return output.update( Pager.get_uris('admin_user_profiles.get_user_profiles', page, limit, output['total'], request.args)) return jsonify(output), 200 return '', 204
7bcfe33925e49a90ac593fad53e4e169b107886e
6,703
import sqlite3 def _repository(): """Helper dependency injection""" db = sqlite3.connect('covid_database.db', isolation_level=None) return CovidTestDataRepository(db)
e30d30f3b4f9673df4863d261655aa6f38a527d7
6,704
def f(x): """Cubic function.""" return x**3
13832221de3490dbd92f4f1a26854baec7010023
6,705
from outliers import smirnov_grubbs as grubbs def correct_anomalies(peaks, alpha=0.05, save_name=""): """ Outlier peak detection (Grubb's test) and removal. Parameters ---------- peaks : array vector of peak locations alpha : real significance level for Grubb's test save_name : str filename to save peaks as to, empty does not save Results ------- corrected_peaks2 : array vector of corrected peak locations max_indices : array indices of original peaks marked as too slow min_indices : array indices of original peaks marked as too fast """ peak_diffs = abs(np.diff(peaks)) max_indices = grubbs.max_test_indices(peak_diffs, alpha=alpha) min_indices = grubbs.min_test_indices(peak_diffs, alpha=alpha) grubb_idxs = max_indices + min_indices # Compute representative difference based on its distribution mean_rr = np.mean( peak_diffs[[ii for ii in range(len(peak_diffs)) if ii not in grubb_idxs]] ) mean_rr = int(np.round(mean_rr)) corrected_peaks = peaks.copy() for ix in max_indices: n = int(np.round((peaks[ix + 1] - peaks[ix]) / mean_rr)) if n == 1: continue new_peaks = np.linspace(peaks[ix], peaks[ix + 1], n, dtype=int, endpoint=False)[1:] corrected_peaks = np.append(corrected_peaks, new_peaks) corrected_peaks = np.sort(corrected_peaks) corrected_peak_diffs = abs(np.diff(corrected_peaks)) min_indices = grubbs.min_test_indices(corrected_peak_diffs, alpha=alpha) # deleting peak such that resultant RR interval is furthest from mean RR # (i.e. gives longer RR interval) too_fast = np.array(min_indices) # index of peaks to delete (and then reinsert) peaks_to_replace = np.zeros_like(too_fast) new_peaks2 = np.zeros_like(too_fast, dtype=float) for index, i in enumerate(too_fast): # print(index, i) if i == (corrected_peak_diffs.size - 1): # if last RR interval (edge case) peaks_to_replace[index] = i # replace first peak # compute new diff_peak new_diff = (corrected_peaks[i + 1] - corrected_peaks[i - 1])/2 new_peaks2[index] = corrected_peaks[i - 1] + new_diff else: # replace first peak new_diff1 = corrected_peaks[i + 1] - corrected_peaks[i - 1] # replace second peak new_diff2 = corrected_peaks[i + 2] - corrected_peaks[i] if new_diff1 > new_diff2: # replacing first peak results in new RR interval # furthest from mean RR interval peaks_to_replace[index] = i # compute new diff_peak new_diff = (corrected_peaks[i + 1] - corrected_peaks[i - 1])/2 new_peaks2[index] = corrected_peaks[i - 1] + new_diff else: # replacing second peak results in new RR interval # furthest from mean RR interval peaks_to_replace[index] = i + 1 # compute new diff_peak new_diff = (corrected_peaks[i + 2] - corrected_peaks[i])/2 new_peaks2[index] = corrected_peaks[i] + new_diff corrected_peaks2 = corrected_peaks.copy() np.put(corrected_peaks2, peaks_to_replace.astype(int), new_peaks2) # save peaks if save_name != "": np.savetxt(save_name, corrected_peaks2, delimiter=",") return corrected_peaks2, max_indices, min_indices
cf33123e963b245007c8be4777cecd1224d4e3fa
6,707
def svn_wc_walk_entries(*args): """ svn_wc_walk_entries(char path, svn_wc_adm_access_t adm_access, svn_wc_entry_callbacks_t walk_callbacks, void walk_baton, svn_boolean_t show_hidden, apr_pool_t pool) -> svn_error_t """ return apply(_wc.svn_wc_walk_entries, args)
791e0f635aa56329f78a1ed0f171217518f9be05
6,708
def dlp_to_datacatalog_builder( taskgroup: TaskGroup, datastore: str, project_id: str, table_id: str, dataset_id: str, table_dlp_config: DlpTableConfig, next_task: BaseOperator, dag, ) -> TaskGroup: """ Method for returning a Task Group for scannign a table with DLP, and creating BigQuery policy tags based on the results 1) Scan table with DLP and write results to BigQuery 2) Schedule future DLP 3) Read results of DLP scan from BigQuery 4) Update Policy Tags in BQ Returns the first task """ assert table_dlp_config.source_config is not None # setup tables vars dlp_results_dataset_id = table_dlp_config.source_config.results_dataset_id table_ref = TableReference(DatasetReference(project_id, dataset_id), table_id) dlp_results_table_ref = TableReference( DatasetReference(project_id, dlp_results_dataset_id), f"{table_id}_dlp_results" ) dlp_results_table = f"{dlp_results_table_ref.project}.{dlp_results_table_ref.dataset_id}.{dlp_results_table_ref.table_id}" # setup DLP scan vars dlp_template_name = table_dlp_config.get_template_name() rows_limit_percent = table_dlp_config.get_rows_limit_percent() inspect_job = build_inspect_job_config( dlp_template_name, table_ref, rows_limit_percent, dlp_results_table_ref ) # 1 First delete the results table delete_dlp_results = BigQueryDeleteTableOperator( task_id=f"delete_old_dlp_results_{datastore}", deletion_dataset_table=dlp_results_table, ignore_if_missing=True, task_group=taskgroup, dag=dag, ) # 2 Scan table scan_task = CloudDLPCreateDLPJobOperator( task_id=f"scan_table_{datastore}", project_id=project_id, inspect_job=inspect_job, wait_until_finished=True, task_group=taskgroup, dag=dag, ) # 4. Read results read_results_task = DlpBQInspectionResultsOperator( task_id=f"read_dlp_results_{datastore}", project_id=dlp_results_table_ref.project, dataset_id=dlp_results_table_ref.dataset_id, table_id=dlp_results_table_ref.table_id, do_xcom_push=True, min_match_count=table_dlp_config.get_min_match_count(), task_group=taskgroup, dag=dag, ) # 5. Update policy tags update_tags_task = PythonOperator( task_id=f"update_bq_policy_tags_{datastore}", python_callable=update_bq_policy_tags, # <--- PYTHON LIBRARY THAT COPIES FILES FROM SRC TO DEST task_group=taskgroup, dag=dag, templates_dict={ "dlp_results": f"{{{{ti.xcom_pull(task_ids='{read_results_task.task_id}')}}}}", # "dlp_results": "{{ti.xcom_pull(task_ids='dlp_policy_tags.read_dlp_results_test')}}", }, op_kwargs={ "project_id": project_id, "dataset_id": table_ref.dataset_id, "table_id": table_ref.table_id, "policy_tag_config": table_dlp_config.source_config.policy_tag_config, "task_ids": read_results_task.task_id, }, provide_context=True, ) delete_dlp_results >> scan_task >> read_results_task >> update_tags_task >> next_task return delete_dlp_results
53950a99dddc4f2a61dca12908c3b2a17a3765c4
6,709
import textwrap def dedent(ind, text): """ Dedent text to the specific indentation level. :param ind: common indentation level for the resulting text (number of spaces to append to every line) :param text: text that should be transformed. :return: ``text`` with all common indentation removed, and then the specified amount of indentation added. """ text2 = textwrap.dedent(text) if ind == 0: return text2 indent_str = " " * ind return "\n".join(indent_str + line for line in text2.split("\n"))
271b9fd270d78c4bc952af31d3d9be0ff6bdab73
6,711
def get_vendor(request): """ Returns the ``JSON`` serialized data of the requested vendor on ``GET`` request. .. http:get:: /get_vendor/ Gets the JSON serialized data of the requested vendor. **Example request**: .. sourcecode:: http GET /get_vendor/ HTTP/1.1 Host: localhost:8000 Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9 :param vendor_id: Vendor primary key. **Example response**: .. sourcecode:: http HTTP/1.1 200 OK Vary: Accept Content-Type: application/json; charset=utf-8 [ { "name": "Lug Vendor", "identifier": "TBPN-02692", "gstin": "89AAC4683897343", "address": { "name": "Kuame Burns", "address": "Nonummy Avenue", "city": "Chung Cheong", "phone": "679 166-3127", "state": "Guanacaste", "country": "tellusidnunc.net", "post": "8949" } } ] :resheader Content-Type: application/json :statuscode 200: List of vendors received successfully. :statuscode 400: Bad request version :statuscode 500: Vendor matching query does not exist. """ if request.method == 'GET': vendor_id = request.GET.get('vendor_id') vendor = VendorSerializer(Vendor.objects.get(id=vendor_id)) return JsonResponse(vendor.data)
e142854426ef406bdfe6e34d0629e80d49493c91
6,713
def ensure_bin_str(s): """assert type of s is basestring and convert s to byte string""" assert isinstance(s, basestring), 's should be string' if isinstance(s, unicode): s = s.encode('utf-8') return s
3ce171f02a371073c5474596da9c963b0a77a415
6,714
def _word_accuracy(pred_data, ref_data): """compute word-level accuracy""" pred_size = len(pred_data) ref_size = len(ref_data) if pred_size <= 0 or ref_size <= 0: raise ValueError("size of predict or reference data is less than or equal to 0") if pred_size != ref_size: raise ValueError("size of predict and reference data don't match") total_count = 0 for i in range(pred_size): pred_word = pred_data[i].strip().slipt(" ") ref_word = ref_data[i].strip().slipt(" ") pred_len = len(pred_word) ref_len = len(ref_word) match_count = 0 for j in range(min(pred_len, ref_len)): predict_word = pred_word[j] reference_word = ref_word[j] if predict_word == reference_word: match_count += 1 total_accuracy += 100.0 * match_count / max(pred_len, ref_len) total_count += 1 word_accuracy = total_accuracy / total_count return word_accuracy
c4abfcc439fca5d14b5edc8289ef9ee2d46807fe
6,715
import requests def api_retrieve_part(pt_id): """ Allows the client to call "retrieve" method on the server side to retrieve the part from the ledger. Args: pt_id (str): The uuid of the part Returns: type: str String representing JSON object which allows the client to know that the call was either a success or a failure. """ response = requests.get( "http://127.0.0.1:852/tp/part/{}".format(pt_id) ) output = response.content.decode("utf-8").strip() return output
5043415dcdb95e59ec87271bf62d1f04f818af9b
6,717
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup = ('lin',0)): """ 完整的线性SMO算法 Parameters: dataMatIn - 数据矩阵 classLabels - 数据标签 C - 松弛变量 toler - 容错率 maxIter - 最大迭代次数 kTup - 包含核函数信息的元组 Returns: oS.b - SMO算法计算的b oS.alphas - SMO算法计算的alphas """ oS = optStruct(np.mat(dataMatIn), np.mat(classLabels).transpose(), C, toler, kTup) #初始化数据结构 iter = 0 #初始化当前迭代次数 entireSet = True; alphaPairsChanged = 0 while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)): #遍历整个数据集都alpha也没有更新或者超过最大迭代次数,则退出循环 alphaPairsChanged = 0 if entireSet: #遍历整个数据集 for i in range(oS.m): alphaPairsChanged += innerL(i,oS) #使用优化的SMO算法 print("全样本遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged)) iter += 1 else: #遍历非边界值 nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] #遍历不在边界0和C的alpha for i in nonBoundIs: alphaPairsChanged += innerL(i,oS) print("非边界遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged)) iter += 1 if entireSet: #遍历一次后改为非边界遍历 entireSet = False elif (alphaPairsChanged == 0): #如果alpha没有更新,计算全样本遍历 entireSet = True print("迭代次数: %d" % iter) return oS.b,oS.alphas #返回SMO算法计算的b和alphas
fb38ef33ab624a74f3da320c5e90a48aa307d588
6,718
def _get_output_data(output_list, heat, stack_id): """ 获取output数据 """ response = { 'code': 200, 'msg': 'ok', 'status': utils.INSTANTIATED, 'data': [] } for item in output_list['outputs']: output = heat.stacks.output_show(stack_id, item['output_key']) output_value = output['output']['output_value'] item = { 'vmId': output_value['vmId'], 'vncUrl': output_value['vncUrl'], 'networks': [] } if 'networks' in output_value and output_value['networks'] is not None: for net_name, ip_data in output_value['networks'].items(): if utils.validate_uuid(net_name): continue network = { 'name': net_name, 'ip': ip_data[0]['addr'] } item['networks'].append(network) response['data'].append(item) return response
aca183c1b158e6e7b9e414151e7f5d5505de1188
6,719
def _ensure_str(s): """convert bytestrings and numpy strings to python strings""" return s.decode() if isinstance(s, bytes) else str(s)
05f549166cc459371b380f62393bbc835aa7ff48
6,720
def get_polarimeter_index(pol_name): """Return the progressive number of the polarimeter within the board (0…7) Args: pol_name (str): Name of the polarimeter, like ``R0`` or ``W3``. Returns: An integer from 0 to 7. """ if pol_name[0] == "W": return 7 else: return int(pol_name[1])
0068931868e214896f6263e58fc09215352d502c
6,721
def merge_sort(collection): """ Pure implementation of the fastest ordered collection with heterogeneous : parameter collection : some mutable ordered collection with heterogeneous comparable items inside : return : a sollectiojn order by ascending Examples : >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_sort([]) [] >>> merge_sort([-45, -5, -2]) [-45, -5, -2] """ start, end = [], [] while len(collection) > 1 : min_one, max_one = min(collection), max(collection) start.append(min_one) end.append(max_one) collection.remove(min_one) collection.remove(max_one) end.reverse() return start + collection + end
b704792ef49629e7e9e04c22ffe03f08b3ef76fa
6,722
def sma_centroids(dataframe, column, short_window, long_window, min_width=None, **kwargs): """Identify centermost point between two SMA interception points Define regions as being bounded by two consecutive interceptions of SMAs with different window widths, then choose the centermost data point within that region. Useful for defining regions that capture the crossover of SMAs. Essentially a wrapper around `sma_to_centroids`. Args: dataframe (pandas.DataFrame): dataframe from which SMAs should be calculated and regions defined column (str): name of column over in dataframe from which sliding-window slopes should be calculated short_window (int): number of consecutive dataframe rows to include in the short window long_window (int): number of consecutive dataframe rows to include in the long window min_width: minimum width, expressed in units of `x_column`, below which an intercept should be disregarded as a valid end of a window kwargs: arguments to be passed to calculate_sma() Returns: DataFrame with indices corresponding to dataframe """ x_column = '_datetime_start' sma_short = calculate_sma(dataframe, x_column, column, window=short_window, **kwargs) sma_long = calculate_sma(dataframe, x_column, column, window=long_window, **kwargs) intercepts = find_sma_intercepts(sma_short, sma_long, dataframe[x_column]) return find_sma_centroids(dataframe=dataframe, sma_short=sma_short, sma_long=sma_long, intercepts=intercepts, x_column=x_column, min_width=min_width)
1157318d90ce514a3a85461851c158a0df9d2a3e
6,723
def delMsg(msgNum): """Deletes a specified message from the inbox""" global usrPrompt try: inboxMessages = json.loads(api.getAllInboxMessages()) # gets the message ID via the message index number msgId = inboxMessages['inboxMessages'][int(msgNum)]['msgid'] msgAck = api.trashMessage(msgId) except: print '\n Connection Error\n' usrPrompt = 0 main() return msgAck
b3cc7a4568ca6eae3267cd5247abe88c5ccb8bec
6,724
def capitalize_first(str): """Capitalizes only the first letter of the given string. :param str: string to capitalize :return: str with only the first letter capitalized """ if str == "": return "" return str[0].upper() + str[1:]
ed6dfdfd9709de1682c29ed152131b9da732441b
6,725
def min_cost_edge(G, T): """Returns the edge with the lowest cost/weight. Parameters ---------- G : NetworkX graph T : Prim's Algorithm Returns ------- The edge with the lowest cost/weight. """ edge_list = possible_edges(G, T) edge_list.sort(key = lambda e : cost(G, e)) return edge_list[0]
3720bb59cddf0b29beb9f9162941ddf7f86dd429
6,726
import io import base64 def get_image_html_tag(fig, format="svg"): """ Returns an HTML tag with embedded image data in the given format. :param fig: a matplotlib figure instance :param format: output image format (passed to fig.savefig) """ stream = io.BytesIO() # bbox_inches: expand the canvas to include the legend that was put outside the plot # see https://stackoverflow.com/a/43439132 fig.savefig(stream, format=format, bbox_inches="tight") data = stream.getvalue() if format == "svg": return data.decode("utf-8") data = base64.b64encode(data).decode("utf-8") return f"<img src=\"data:image/{format};base64,{data}\">"
f5c59a6f4f70fb6616cec4619d8cbf9ca2e28529
6,727
def reformat_language_tuple(langval): """Produce standardly-formatted language specification string using given language tuple. :param langval: `tuple` in form ('<language>', '<language variant>'). Example: ('en', 'US') :return: `string` formatted in form '<language>-<language-variant>' """ if langval: langval_base, langval_variant = langval if langval_variant: langval_base = '{0}-{1}'.format(langval_base, langval_variant) return langval_base else: return None
63c479d7dd273f31b9bdcc6c0ce81d4267a43714
6,728
def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a GhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 16, 16, 0, 1]], # stage2 [[3, 48, 24, 0, 2]], [[3, 72, 24, 0, 1]], # stage3 [[5, 72, 40, 0.25, 2]], [[5, 120, 40, 0.25, 1]], # stage4 [[3, 240, 80, 0, 2]], [[3, 200, 80, 0, 1], [3, 184, 80, 0, 1], [3, 184, 80, 0, 1], [3, 480, 112, 0.25, 1], [3, 672, 112, 0.25, 1] ], # stage5 [[5, 672, 160, 0.25, 2]], [[5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1], [5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( GhostNet, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **model_kwargs)
8b0bca5e4d711dce5150d8c3cdb187c9b1a23ec3
6,729
def re_suffix(string): """ Remove any “os.extsep” prefixing a string, and ensure that it ends with a “$” – to indicate a regular expression suffix. """ if not string: return None return rf"{string.casefold().lstrip(QUALIFIER).rstrip(DOLLA)}{DOLLA}"
fd1767f0d539e284f56c32f5ed4a8789a6638fca
6,730
def _alternate_dataclass_repr(object) -> None: """ Overrides the default dataclass repr by not printing fields that are set to None. i.e. Only prints fields which have values. This is for ease of reading. """ populated_fields = { field.name: getattr(object, f"{field.name}") for field in fields(object) if getattr(object, f"{field.name}") is not None } class_name = object.__class__.__name__ repr_string = f"{class_name}(" + ", ".join([f"{field}={value}" for field, value in populated_fields.items()]) + ")" return repr_string
c9c07508a39c0732698c1ed6803ef00c4b2f65d6
6,731
def which_coords_in_bounds(coords, map_shape): """ Checks the coordinates given to see if they are in bounds :param coords Union[array(2)[int], array(N,2)[int]]: [int, int] or [[int, int], ...], Nx2 ndarray :param map_shape Tuple[int]: shape of the map to check bounds :return Union[bool array(N)[bool]]: corresponding to whether the coord is in bounds (if array is given, then it will be array of bool) """ assert isinstance(coords, np.ndarray) and coords.dtype == np.int assert np.array(map_shape).dtype == np.int if len(coords.shape) == 1: return coords[0] >= 0 and coords[0] < map_shape[0] and coords[1] >= 0 and coords[1] < map_shape[1] else: return np.logical_and(np.logical_and(coords[:, 0] >= 0, coords[:, 0] < map_shape[0]), np.logical_and(coords[:, 1] >= 0, coords[:, 1] < map_shape[1]))
5606c24430e9967cade8bdeb789f10bed1248eb1
6,732
def get_activation_function(activation_function_name: str): """ Given the name of an activation function, retrieve the corresponding function and its derivative :param cost_function_name: the name of the cost function :return: the corresponding activation function and its derivative """ try: return activation_functions[activation_function_name] except KeyError: raise UnknownActivationFunctionName(activation_function_name)
f2a830c15cb93bd9fce1b66c2b5ca14530005cd5
6,733
def url_split(url, uses_hostname=True, split_filename=False): """Split the URL into its components. uses_hostname defines whether the protocol uses a hostname or just a path (for "file://relative/directory"-style URLs) or not. split_filename defines whether the filename will be split off in an attribute or whether it will be part of the path """ # urlparse.urlparse() is a bit deficient for our needs. try: if uses_hostname: match = URL_RE_HOSTNAME.match(url).groupdict() else: match = URL_RE_PLAIN.match(url).groupdict() except AttributeError: raise AttributeError, "Invalid URL." for key, item in match.items(): if item is None: if key == "port": # We should leave port as None if it's not defined. match[key] = "0" else: match[key] = "" if uses_hostname: match["port"] = int(match["port"]) if not split_filename: match["path"] = match["path"] + match["file"] match["file"] = "" return URLSplitResult(match)
5c76eb58c520043ab922c941806f24c60f9ee721
6,734
def memdiff_search(bytes1, bytes2): """ Use binary searching to find the offset of the first difference between two strings. :param bytes1: The original sequence of bytes :param bytes2: A sequence of bytes to compare with bytes1 :type bytes1: str :type bytes2: str :rtype: int offset of the first location a and b differ, None if strings match """ # Prevent infinite recursion on inputs with length of one half = (len(bytes1) // 2) or 1 # Compare first half of the string if bytes1[:half] != bytes2[:half]: # Have we found the first diff? if bytes1[0] != bytes2[0]: return 0 return memdiff_search(bytes1[:half], bytes2[:half]) # Compare second half of the string if bytes1[half:] != bytes2[half:]: return memdiff_search(bytes1[half:], bytes2[half:]) + half
fbcb221c77730c45be4c81a6ae7515e602468af5
6,735
def decomposeJonesMatrix(Jmat): """ Decompose 2x2 Jones matrix to retardance and diattenuation vectors """ Jmat = Jmat / cp.sqrt(cp.linalg.det(Jmat)) q = cp.array([Jmat[0, 0] - Jmat[1, 1], Jmat[1, 0] + Jmat[0, 1], -1j * Jmat[1, 0] + 1j * Jmat[0, 1]]) / 2 tr = cp.trace(Jmat) / 2 c = cp.arccosh(tr) csin = c / cp.sinh(c) if c == 0: csin = 1 f = 2 * q * csin rotVector = -cp.imag(f) diatVector = cp.real(f) return rotVector, diatVector
151320a0f77f2fb3a77d8e06b1e623c0fed6c673
6,736
def format_utc(time): """Format a time in UTC.""" return as_utc(time).strftime('%Y-%m-%d %H:%M:%S.%f')
88624b8e166aa07172abd14c391945e33c77332f
6,737
def _expand_sources(sources): """ Expands a user-provided specification of source files into a list of paths. """ if sources is None: return [] if isinstance(sources, str): sources = [x.strip() for x in sources.split(",")] elif isinstance(sources, (float, int)): sources = [str(sources)] return [path for source in sources for path in _glob(source)]
6e16eaae5edb68a5be7e0af4be777fc76b70d22a
6,740
from typing import Optional def get_stream(stream_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult: """ This data source provides details about a specific Stream resource in Oracle Cloud Infrastructure Streaming service. Gets detailed information about a stream, including the number of partitions. ## Example Usage ```python import pulumi import pulumi_oci as oci test_stream = oci.streaming.get_stream(stream_id=oci_streaming_stream["test_stream"]["id"]) ``` :param str stream_id: The OCID of the stream. """ __args__ = dict() __args__['streamId'] = stream_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:streaming/getStream:getStream', __args__, opts=opts, typ=GetStreamResult).value return AwaitableGetStreamResult( compartment_id=__ret__.compartment_id, defined_tags=__ret__.defined_tags, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_state_details=__ret__.lifecycle_state_details, messages_endpoint=__ret__.messages_endpoint, name=__ret__.name, partitions=__ret__.partitions, retention_in_hours=__ret__.retention_in_hours, state=__ret__.state, stream_id=__ret__.stream_id, stream_pool_id=__ret__.stream_pool_id, time_created=__ret__.time_created)
fd7eb6675f5d232e90a94e18e4c68e6d538ca7e4
6,741
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): """ Fusion method. """ n_channels_int = n_channels in_act = input_a + input_b t_act = ops.Tanh()(in_act[:, :n_channels_int, :]) s_act = ops.Sigmoid()(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts
9412a3b20960b0a280a06569846df744f14a9d63
6,743
import json def _classes_dict(filename): """ Open JSON file and read the data for the Classes (and Origins). filename - the file name as a string. Runtime: O(n) """ class_dict = {} # {'robot': ['blitzcrank']} class_bonus_dict = {} dict = { 1: {}, 2: {}, 3: {}, 4 : {}, 6 : {}} # { 1 : { 'robot' : set['blitzcrank'], 'exile' : set['yasuo'] }, 2 : ... } with open(filename) as json_file: data = json.load(json_file) for class_obj in data.items(): # O(n) key = class_obj[1]['key'] # String name = class_obj[1]['name'] # String description = class_obj[1]['description'] # String accentChampionImage = class_obj[1]['accentChampionImage'] # URL as String bonuses = class_obj[1]['bonuses'] # Array [{'needed': int, 'effect': string}] needed = bonuses[-1]['needed'] # Check the highest number for needed. (In this case it's the last item in the array) class_dict[key] = [] class_bonus_dict[key] = needed dict[needed].update({class_obj[0]: []}) return dict
44fa2acec6c7235995bfdabaab149b4cba2cb7cc
6,744
def set_incident_seen(incident, user=None): """ Updates the incident to be seen """ is_org_member = incident.organization.has_access(user) if is_org_member: is_project_member = False for incident_project in IncidentProject.objects.filter(incident=incident).select_related( "project" ): if incident_project.project.member_set.filter(user=user).exists(): is_project_member = True break if is_project_member: incident_seen, created = IncidentSeen.objects.create_or_update( incident=incident, user=user, values={"last_seen": timezone.now()} ) return incident_seen return False
8b970ec492bdb72b6e05c053f7a5b9bf919b15e7
6,745
def single_parity_check( llr: np.array, mask_steps: int = 0, last_chunk_type: int = 0, ) -> np.array: """Compute beta value for Single parity node.""" all_sign = np.sign(np.prod(llr)) abs_alpha = np.fabs(llr) first_min_idx, second_min_idx = np.argsort(abs_alpha)[:2] result = np.sign(llr) * all_sign for i in range(result.size): if i == first_min_idx: result[i] *= abs_alpha[second_min_idx] else: result[i] *= abs_alpha[first_min_idx] return result
5cc9984bb86fdfd777b2a968fb887388a5422e4f
6,746
def _deserialize_job_result(user_input: JSON) -> JobResult: """Deserialize a JobResult from JSON.""" job = _deserialize_job(user_input['job']) plan = _deserialize_plan(user_input['plan']) is_done = user_input['is_done'] outputs = dict() # type: Dict[str, Asset] for name, asset in user_input['outputs'].items(): outputs[name] = _deserialize_asset(asset) return JobResult(job, plan, is_done, outputs)
883629fa2650fc043124c6ccf721ed38093daa19
6,747
def _brute_force_knn(X, centers, k, return_distance=True): """ :param X: array of shape=(n_samples, n_features) :param centers: array of shape=(n_centers, n_features) :param k: int, only looking for the nearest k points to each center. :param return_distance: bool, if True the return the distance along with the points :return: """ if k == 1: nearest, dists = pairwise_distances_argmin_min(centers, X) return (dists, nearest) if return_distance else nearest else: dists = pairwise_distances(centers, X) nearest = np.argsort(dists, axis=1)[:, :k] return (np.vstack([dists[i, nearest[i]] for i in range(dists.shape[0])]), nearest) if return_distance else nearest
b185a8d9e901a12a1385ed5ffc3183a5cc51c1b5
6,748